code
stringlengths 1.03k
250k
| repo_name
stringlengths 7
70
| path
stringlengths 4
177
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 1.03k
250k
|
---|---|---|---|---|---|
/*
* Read and write JSON.
*
* Copyright (c) 2014 Marko Kreen
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <usual/json.h>
#include <usual/cxextra.h>
#include <usual/cbtree.h>
#include <usual/misc.h>
#include <usual/utf8.h>
#include <usual/ctype.h>
#include <usual/bytemap.h>
#include <usual/string.h>
#include <math.h>
#define TYPE_BITS 3
#define TYPE_MASK ((1 << TYPE_BITS) - 1)
#define UNATTACHED ((struct JsonValue *)(1 << TYPE_BITS))
#define JSON_MAX_KEY (1024*1024)
#define NUMBER_BUF 100
#define JSON_MAXINT ((1LL << 53) - 1)
#define JSON_MININT (-(1LL << 53) + 1)
/*
* Common struct for all JSON values
*/
struct JsonValue {
/* actual value for simple types */
union {
double v_float; /* float */
int64_t v_int; /* int */
bool v_bool; /* bool */
size_t v_size; /* str/list/dict */
} u;
/* pointer to next elem and type in low bits */
uintptr_t v_next_and_type;
};
/*
* List container.
*/
struct ValueList {
struct JsonValue *first;
struct JsonValue *last;
struct JsonValue **array;
};
/*
* Extra data for list/dict.
*/
struct JsonContainer {
/* parent container */
struct JsonValue *c_parent;
/* main context for child alloc */
struct JsonContext *c_ctx;
/* child elements */
union {
struct CBTree *c_dict;
struct ValueList c_list;
} u;
};
#define DICT_EXTRA (offsetof(struct JsonContainer, u.c_dict) + sizeof(struct CBTree *))
#define LIST_EXTRA (sizeof(struct JsonContainer))
/*
* Allocation context.
*/
struct JsonContext {
CxMem *pool;
unsigned int options;
/* parse state */
struct JsonValue *parent;
struct JsonValue *cur_key;
struct JsonValue *top;
const char *lasterr;
char errbuf[128];
int64_t linenr;
};
struct RenderState {
struct MBuf *dst;
unsigned int options;
};
/*
* Parser states
*/
enum ParseState {
S_INITIAL_VALUE = 1,
S_LIST_VALUE,
S_LIST_VALUE_OR_CLOSE,
S_LIST_COMMA_OR_CLOSE,
S_DICT_KEY,
S_DICT_KEY_OR_CLOSE,
S_DICT_COLON,
S_DICT_VALUE,
S_DICT_COMMA_OR_CLOSE,
S_PARENT,
S_DONE,
MAX_STATES,
};
/*
* Tokens that change state.
*/
enum TokenTypes {
T_STRING,
T_OTHER,
T_COMMA,
T_COLON,
T_OPEN_DICT,
T_OPEN_LIST,
T_CLOSE_DICT,
T_CLOSE_LIST,
MAX_TOKENS
};
/*
* 4-byte ints for small string tokens.
*/
#define C_NULL FOURCC('n','u','l','l')
#define C_TRUE FOURCC('t','r','u','e')
#define C_ALSE FOURCC('a','l','s','e')
/*
* Signature for render functions.
*/
typedef bool (*render_func_t)(struct RenderState *rs, struct JsonValue *jv);
static bool render_any(struct RenderState *rs, struct JsonValue *jv);
/*
* Header manipulation
*/
static inline enum JsonValueType get_type(struct JsonValue *jv)
{
return jv->v_next_and_type & TYPE_MASK;
}
static inline bool has_type(struct JsonValue *jv, enum JsonValueType type)
{
if (!jv)
return false;
return get_type(jv) == type;
}
static inline struct JsonValue *get_next(struct JsonValue *jv)
{
return (struct JsonValue *)(jv->v_next_and_type & ~(uintptr_t)TYPE_MASK);
}
static inline void set_next(struct JsonValue *jv, struct JsonValue *next)
{
jv->v_next_and_type = (uintptr_t)next | get_type(jv);
}
static inline bool is_unattached(struct JsonValue *jv)
{
return get_next(jv) == UNATTACHED;
}
static inline void *get_extra(struct JsonValue *jv)
{
return (void *)(jv + 1);
}
static inline char *get_cstring(struct JsonValue *jv)
{
enum JsonValueType type = get_type(jv);
if (type != JSON_STRING)
return NULL;
return get_extra(jv);
}
/*
* Collection header manipulation.
*/
static inline struct JsonContainer *get_container(struct JsonValue *jv)
{
enum JsonValueType type = get_type(jv);
if (type != JSON_DICT && type != JSON_LIST)
return NULL;
return get_extra(jv);
}
static inline void set_parent(struct JsonValue *jv, struct JsonValue *parent)
{
struct JsonContainer *c = get_container(jv);
if (c)
c->c_parent = parent;
}
static inline struct JsonContext *get_context(struct JsonValue *jv)
{
struct JsonContainer *c = get_container(jv);
return c ? c->c_ctx : NULL;
}
static inline struct CBTree *get_dict_tree(struct JsonValue *jv)
{
struct JsonContainer *c;
if (has_type(jv, JSON_DICT)) {
c = get_container(jv);
return c->u.c_dict;
}
return NULL;
}
static inline struct ValueList *get_list_vlist(struct JsonValue *jv)
{
struct JsonContainer *c;
if (has_type(jv, JSON_LIST)) {
c = get_container(jv);
return &c->u.c_list;
}
return NULL;
}
/*
* Random helpers
*/
/* copy and return final pointer */
static inline char *plain_copy(char *dst, const char *src, const char *endptr)
{
if (src < endptr) {
memcpy(dst, src, endptr - src);
return dst + (endptr - src);
}
return dst;
}
/* error message on context */
_PRINTF(2,0)
static void format_err(struct JsonContext *ctx, const char *errmsg, va_list ap)
{
char buf[119];
if (ctx->lasterr)
return;
vsnprintf(buf, sizeof(buf), errmsg, ap);
snprintf(ctx->errbuf, sizeof(ctx->errbuf), "Line #%" PRIi64 ": %s", ctx->linenr, buf);
ctx->lasterr = ctx->errbuf;
}
/* set message and return false */
_PRINTF(2,3)
static bool err_false(struct JsonContext *ctx, const char *errmsg, ...)
{
va_list ap;
va_start(ap, errmsg);
format_err(ctx, errmsg, ap);
va_end(ap);
return false;
}
/* set message and return NULL */
_PRINTF(2,3)
static void *err_null(struct JsonContext *ctx, const char *errmsg, ...)
{
va_list ap;
va_start(ap, errmsg);
format_err(ctx, errmsg, ap);
va_end(ap);
return NULL;
}
/* callback for cbtree, returns key bytes */
static size_t get_key_data_cb(void *dictptr, void *keyptr, const void **dst_p)
{
struct JsonValue *key = keyptr;
*dst_p = get_cstring(key);
return key->u.v_size;
}
/* add elemnt to list */
static void real_list_append(struct JsonValue *list, struct JsonValue *elem)
{
struct ValueList *vlist;
vlist = get_list_vlist(list);
if (vlist->last) {
set_next(vlist->last, elem);
} else {
vlist->first = elem;
}
vlist->last = elem;
vlist->array = NULL;
list->u.v_size++;
}
/* add key to tree */
static bool real_dict_add_key(struct JsonContext *ctx, struct JsonValue *dict, struct JsonValue *key)
{
struct CBTree *tree;
tree = get_dict_tree(dict);
if (!tree)
return err_false(ctx, "Expect dict");
if (json_value_size(key) > JSON_MAX_KEY)
return err_false(ctx, "Too large key");
dict->u.v_size++;
if (!cbtree_insert(tree, key))
return err_false(ctx, "Key insertion failed");
return true;
}
/* create basic value struct, link to stuctures */
static struct JsonValue *mk_value(struct JsonContext *ctx, enum JsonValueType type, size_t extra, bool attach)
{
struct JsonValue *val;
struct JsonContainer *col = NULL;
if (!ctx)
return NULL;
val = cx_alloc(ctx->pool, sizeof(struct JsonValue) + extra);
if (!val)
return err_null(ctx, "No memory");
if ((uintptr_t)val & TYPE_MASK)
return err_null(ctx, "Unaligned pointer");
/* initial value */
val->v_next_and_type = type;
val->u.v_int = 0;
if (type == JSON_DICT || type == JSON_LIST) {
col = get_container(val);
col->c_ctx = ctx;
col->c_parent = NULL;
if (type == JSON_DICT) {
col->u.c_dict = cbtree_create(get_key_data_cb, NULL, val, ctx->pool);
if (!col->u.c_dict)
return err_null(ctx, "No memory");
} else {
memset(&col->u.c_list, 0, sizeof(col->u.c_list));
}
}
/* independent JsonValue? */
if (!attach) {
set_next(val, UNATTACHED);
return val;
}
/* attach to parent */
if (col)
col->c_parent = ctx->parent;
/* attach to previous value */
if (has_type(ctx->parent, JSON_DICT)) {
if (ctx->cur_key) {
set_next(ctx->cur_key, val);
ctx->cur_key = NULL;
} else {
ctx->cur_key = val;
}
} else if (has_type(ctx->parent, JSON_LIST)) {
real_list_append(ctx->parent, val);
} else if (!ctx->top) {
ctx->top = val;
} else {
return err_null(ctx, "Only one top element is allowed");
}
return val;
}
static void prepare_array(struct JsonValue *list)
{
struct JsonContainer *c;
struct JsonValue *val;
struct ValueList *vlist;
size_t i;
vlist = get_list_vlist(list);
if (vlist->array)
return;
c = get_container(list);
vlist->array = cx_alloc(c->c_ctx->pool, list->u.v_size * sizeof(struct JsonValue *));
if (!vlist->array)
return;
val = vlist->first;
for (i = 0; i < list->u.v_size && val; i++) {
vlist->array[i] = val;
val = get_next(val);
}
}
/*
* Parsing code starts
*/
/* create and change context */
static bool open_container(struct JsonContext *ctx, enum JsonValueType type, unsigned int extra)
{
struct JsonValue *jv;
jv = mk_value(ctx, type, extra, true);
if (!jv)
return false;
ctx->parent = jv;
ctx->cur_key = NULL;
return true;
}
/* close and change context */
static enum ParseState close_container(struct JsonContext *ctx, enum ParseState state)
{
struct JsonContainer *c;
if (state != S_PARENT)
return (int)err_false(ctx, "close_container bug");
c = get_container(ctx->parent);
if (!c)
return (int)err_false(ctx, "invalid parent");
ctx->parent = c->c_parent;
ctx->cur_key = NULL;
if (has_type(ctx->parent, JSON_DICT)) {
return S_DICT_COMMA_OR_CLOSE;
} else if (has_type(ctx->parent, JSON_LIST)) {
return S_LIST_COMMA_OR_CLOSE;
}
return S_DONE;
}
/* parse 4-char token */
static bool parse_char4(struct JsonContext *ctx, const char **src_p, const char *end,
uint32_t t_exp, enum JsonValueType type, bool val)
{
const char *src;
uint32_t t_got;
struct JsonValue *jv;
src = *src_p;
if (src + 4 > end)
return err_false(ctx, "Unexpected end of token");
memcpy(&t_got, src, 4);
if (t_exp != t_got)
return err_false(ctx, "Invalid token");
jv = mk_value(ctx, type, 0, true);
if (!jv)
return false;
jv->u.v_bool = val;
*src_p += 4;
return true;
}
/* parse int or float */
static bool parse_number(struct JsonContext *ctx, const char **src_p, const char *end)
{
const char *start, *src;
enum JsonValueType type = JSON_INT;
char *tokend = NULL;
char buf[NUMBER_BUF];
size_t len;
struct JsonValue *jv;
double v_float = 0;
int64_t v_int = 0;
/* scan & copy */
start = src = *src_p;
for (; src < end; src++) {
if (*src >= '0' && *src <= '9') {
} else if (*src == '+' || *src == '-') {
} else if (*src == '.' || *src == 'e' || *src == 'E') {
type = JSON_FLOAT;
} else {
break;
}
}
len = src - start;
if (len >= NUMBER_BUF)
goto failed;
memcpy(buf, start, len);
buf[len] = 0;
/* now parse */
errno = 0;
tokend = buf;
if (type == JSON_FLOAT) {
v_float = strtod_dot(buf, &tokend);
if (*tokend != 0 || errno || !isfinite(v_float))
goto failed;
} else if (len < 8) {
v_int = strtol(buf, &tokend, 10);
if (*tokend != 0 || errno)
goto failed;
} else {
v_int = strtoll(buf, &tokend, 10);
if (*tokend != 0 || errno || v_int < JSON_MININT || v_int > JSON_MAXINT)
goto failed;
}
/* create value struct */
jv = mk_value(ctx, type, 0, true);
if (!jv)
return false;
if (type == JSON_FLOAT) {
jv->u.v_float = v_float;
} else {
jv->u.v_int = v_int;
}
*src_p = src;
return true;
failed:
if (!errno)
errno = EINVAL;
return err_false(ctx, "Number parse failed");
}
/*
* String parsing
*/
static int parse_hex(const char *s, const char *end)
{
int v = 0, c, i, x;
if (s + 4 > end)
return -1;
for (i = 0; i < 4; i++) {
c = s[i];
if (c >= '0' && c <= '9') {
x = c - '0';
} else if (c >= 'a' && c <= 'f') {
x = c - 'a' + 10;
} else if (c >= 'A' && c <= 'F') {
x = c - 'A' + 10;
} else {
return -1;
}
v = (v << 4) | x;
}
return v;
}
/* process \uXXXX escapes, merge surrogates */
static bool parse_uescape(struct JsonContext *ctx, char **dst_p, char *dstend,
const char **src_p, const char *end)
{
int c, c2;
const char *src = *src_p;
c = parse_hex(src, end);
if (c <= 0)
return err_false(ctx, "Invalid hex escape");
src += 4;
if (c >= 0xD800 && c <= 0xDFFF) {
/* first surrogate */
if (c >= 0xDC00)
return err_false(ctx, "Invalid UTF16 escape");
if (src + 6 > end)
return err_false(ctx, "Invalid UTF16 escape");
/* second surrogate */
if (src[0] != '\\' || src[1] != 'u')
return err_false(ctx, "Invalid UTF16 escape");
c2 = parse_hex(src + 2, end);
if (c2 < 0xDC00 || c2 > 0xDFFF)
return err_false(ctx, "Invalid UTF16 escape");
c = 0x10000 + ((c & 0x3FF) << 10) + (c2 & 0x3FF);
src += 6;
}
/* now write char */
if (!utf8_put_char(c, dst_p, dstend))
return err_false(ctx, "Invalid UTF16 escape");
*src_p = src;
return true;
}
#define meta_string(c) (((c) == '"' || (c) == '\\' || (c) == '\0' || \
(c) == '\n' || ((c) & 0x80) != 0) ? 1 : 0)
static const uint8_t string_examine_chars[] = INTMAP256_CONST(meta_string);
/* look for string end, validate contents */
static bool scan_string(struct JsonContext *ctx, const char *src, const char *end,
const char **str_end_p, bool *hasesc_p, int64_t *nlines_p)
{
bool hasesc = false;
int64_t lines = 0;
unsigned int n;
bool check_utf8 = true;
if (ctx->options & JSON_PARSE_IGNORE_ENCODING)
check_utf8 = false;
while (src < end) {
if (!string_examine_chars[(uint8_t)*src]) {
src++;
} else if (*src == '"') {
/* string end */
*hasesc_p = hasesc;
*str_end_p = src;
*nlines_p = lines;
return true;
} else if (*src == '\\') {
hasesc = true;
src++;
if (src < end && (*src == '\\' || *src == '"'))
src++;
} else if (*src & 0x80) {
n = utf8_validate_seq(src, end);
if (n) {
src += n;
} else if (check_utf8) {
goto badutf;
} else {
src++;
}
} else if (*src == '\n') {
lines++;
src++;
} else {
goto badutf;
}
}
return err_false(ctx, "Unexpected end of string");
badutf:
return err_false(ctx, "Invalid UTF8 sequence");
}
/* string boundaries are known, copy and unescape */
static char *process_escapes(struct JsonContext *ctx,
const char *src, const char *end,
char *dst, char *dstend)
{
const char *esc;
/* process escapes */
while (src < end) {
esc = memchr(src, '\\', end - src);
if (!esc) {
dst = plain_copy(dst, src, end);
break;
}
dst = plain_copy(dst, src, esc);
src = esc + 1;
switch (*src++) {
case '"': *dst++ = '"'; break;
case '\\': *dst++ = '\\'; break;
case '/': *dst++ = '/'; break;
case 'b': *dst++ = '\b'; break;
case 'f': *dst++ = '\f'; break;
case 'n': *dst++ = '\n'; break;
case 'r': *dst++ = '\r'; break;
case 't': *dst++ = '\t'; break;
case 'u':
if (!parse_uescape(ctx, &dst, dstend, &src, end))
return NULL;
break;
default:
return err_null(ctx, "Invalid escape code");
}
}
return dst;
}
/* 2-phase string processing */
static bool parse_string(struct JsonContext *ctx, const char **src_p, const char *end)
{
const char *start, *strend = NULL;
bool hasesc = false;
char *dst, *dstend;
size_t len;
struct JsonValue *jv;
int64_t lines = 0;
/* find string boundaries, validate */
start = *src_p;
if (!scan_string(ctx, start, end, &strend, &hasesc, &lines))
return false;
/* create value struct */
len = strend - start;
jv = mk_value(ctx, JSON_STRING, len + 1, true);
if (!jv)
return false;
dst = get_cstring(jv);
dstend = dst + len;
/* copy & process escapes */
if (hasesc) {
dst = process_escapes(ctx, start, strend, dst, dstend);
if (!dst)
return false;
} else {
dst = plain_copy(dst, start, strend);
}
*dst = '\0';
jv->u.v_size = dst - get_cstring(jv);
ctx->linenr += lines;
*src_p = strend + 1;
return true;
}
/*
* Helpers for relaxed parsing
*/
static bool skip_comment(struct JsonContext *ctx, const char **src_p, const char *end)
{
const char *s, *start;
char c;
size_t lnr;
s = start = *src_p;
if (s >= end)
return false;
c = *s++;
if (c == '/') {
s = memchr(s, '\n', end - s);
if (s) {
ctx->linenr++;
*src_p = s + 1;
} else {
*src_p = end;
}
return true;
} else if (c == '*') {
for (lnr = 0; s + 2 <= end; s++) {
if (s[0] == '*' && s[1] == '/') {
ctx->linenr += lnr;
*src_p = s + 2;
return true;
} else if (s[0] == '\n') {
lnr++;
}
}
}
return false;
}
static bool skip_extra_comma(struct JsonContext *ctx, const char **src_p, const char *end, enum ParseState state)
{
bool skip = false;
const char *src = *src_p;
while (src < end && isspace(*src)) {
if (*src == '\n')
ctx->linenr++;
src++;
}
if (src < end) {
if (*src == '}') {
if (state == S_DICT_COMMA_OR_CLOSE || state == S_DICT_KEY_OR_CLOSE)
skip = true;
} else if (*src == ']') {
if (state == S_LIST_COMMA_OR_CLOSE || state == S_LIST_VALUE_OR_CLOSE)
skip = true;
}
}
*src_p = src;
return skip;
}
/*
* Main parser
*/
/* oldstate + token -> newstate */
static const unsigned char STATE_STEPS[MAX_STATES][MAX_TOKENS] = {
[S_INITIAL_VALUE] = {
[T_OPEN_LIST] = S_LIST_VALUE_OR_CLOSE,
[T_OPEN_DICT] = S_DICT_KEY_OR_CLOSE,
[T_STRING] = S_DONE,
[T_OTHER] = S_DONE },
[S_LIST_VALUE] = {
[T_OPEN_LIST] = S_LIST_VALUE_OR_CLOSE,
[T_OPEN_DICT] = S_DICT_KEY_OR_CLOSE,
[T_STRING] = S_LIST_COMMA_OR_CLOSE,
[T_OTHER] = S_LIST_COMMA_OR_CLOSE },
[S_LIST_VALUE_OR_CLOSE] = {
[T_OPEN_LIST] = S_LIST_VALUE_OR_CLOSE,
[T_OPEN_DICT] = S_DICT_KEY_OR_CLOSE,
[T_STRING] = S_LIST_COMMA_OR_CLOSE,
[T_OTHER] = S_LIST_COMMA_OR_CLOSE,
[T_CLOSE_LIST] = S_PARENT },
[S_LIST_COMMA_OR_CLOSE] = {
[T_COMMA] = S_LIST_VALUE,
[T_CLOSE_LIST] = S_PARENT },
[S_DICT_KEY] = {
[T_STRING] = S_DICT_COLON },
[S_DICT_KEY_OR_CLOSE] = {
[T_STRING] = S_DICT_COLON,
[T_CLOSE_DICT] = S_PARENT },
[S_DICT_COLON] = {
[T_COLON] = S_DICT_VALUE },
[S_DICT_VALUE] = {
[T_OPEN_LIST] = S_LIST_VALUE_OR_CLOSE,
[T_OPEN_DICT] = S_DICT_KEY_OR_CLOSE,
[T_STRING] = S_DICT_COMMA_OR_CLOSE,
[T_OTHER] = S_DICT_COMMA_OR_CLOSE },
[S_DICT_COMMA_OR_CLOSE] = {
[T_COMMA] = S_DICT_KEY,
[T_CLOSE_DICT] = S_PARENT },
};
#define MAPSTATE(state, tok) do { \
int newstate = STATE_STEPS[state][tok]; \
if (!newstate) \
return err_false(ctx, "Unexpected symbol: '%c'", c); \
state = newstate; \
} while (0)
/* actual parser */
static bool parse_tokens(struct JsonContext *ctx, const char *src, const char *end)
{
char c;
enum ParseState state = S_INITIAL_VALUE;
bool relaxed = ctx->options & JSON_PARSE_RELAXED;
while (src < end) {
c = *src++;
switch (c) {
case '\n':
ctx->linenr++;
case ' ': case '\t': case '\r': case '\f': case '\v':
/* common case - many spaces */
while (src < end && *src == ' ') src++;
break;
case '"':
MAPSTATE(state, T_STRING);
if (!parse_string(ctx, &src, end))
goto failed;
break;
case 'n':
MAPSTATE(state, T_OTHER);
src--;
if (!parse_char4(ctx, &src, end, C_NULL, JSON_NULL, 0))
goto failed;
continue;
case 't':
MAPSTATE(state, T_OTHER);
src--;
if (!parse_char4(ctx, &src, end, C_TRUE, JSON_BOOL, 1))
goto failed;
break;
case 'f':
MAPSTATE(state, T_OTHER);
if (!parse_char4(ctx, &src, end, C_ALSE, JSON_BOOL, 0))
goto failed;
break;
case '-':
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
MAPSTATE(state, T_OTHER);
src--;
if (!parse_number(ctx, &src, end))
goto failed;
break;
case '[':
MAPSTATE(state, T_OPEN_LIST);
if (!open_container(ctx, JSON_LIST, LIST_EXTRA))
goto failed;
break;
case '{':
MAPSTATE(state, T_OPEN_DICT);
if (!open_container(ctx, JSON_DICT, DICT_EXTRA))
goto failed;
break;
case ']':
MAPSTATE(state, T_CLOSE_LIST);
state = close_container(ctx, state);
if (!state)
goto failed;
break;
case '}':
MAPSTATE(state, T_CLOSE_DICT);
state = close_container(ctx, state);
if (!state)
goto failed;
break;
case ':':
MAPSTATE(state, T_COLON);
if (!real_dict_add_key(ctx, ctx->parent, ctx->cur_key))
goto failed;
break;
case ',':
if (relaxed && skip_extra_comma(ctx, &src, end, state))
continue;
MAPSTATE(state, T_COMMA);
break;
case '/':
if (relaxed && skip_comment(ctx, &src, end))
continue;
/* fallthrough */
default:
return err_false(ctx, "Invalid symbol: '%c'", c);
}
}
if (state != S_DONE)
return err_false(ctx, "Container still open");
return true;
failed:
return false;
}
/* parser public api */
struct JsonValue *json_parse(struct JsonContext *ctx, const char *json, size_t len)
{
const char *end = json + len;
/* reset parser */
ctx->linenr = 1;
ctx->parent = NULL;
ctx->cur_key = NULL;
ctx->lasterr = NULL;
ctx->top = NULL;
if (!parse_tokens(ctx, json, end))
return NULL;
return ctx->top;
}
/*
* Render value as JSON string.
*/
static bool render_null(struct RenderState *rs, struct JsonValue *jv)
{
return mbuf_write(rs->dst, "null", 4);
}
static bool render_bool(struct RenderState *rs, struct JsonValue *jv)
{
if (jv->u.v_bool)
return mbuf_write(rs->dst, "true", 4);
return mbuf_write(rs->dst, "false", 5);
}
static bool render_int(struct RenderState *rs, struct JsonValue *jv)
{
char buf[NUMBER_BUF];
int len;
len = snprintf(buf, sizeof(buf), "%" PRIi64, jv->u.v_int);
if (len < 0 || len >= NUMBER_BUF)
return false;
return mbuf_write(rs->dst, buf, len);
}
static bool render_float(struct RenderState *rs, struct JsonValue *jv)
{
char buf[NUMBER_BUF + 2];
int len;
len = dtostr_dot(buf, NUMBER_BUF, jv->u.v_float);
if (len < 0 || len >= NUMBER_BUF)
return false;
if (!memchr(buf, '.', len) && !memchr(buf, 'e', len)) {
buf[len++] = '.';
buf[len++] = '0';
}
return mbuf_write(rs->dst, buf, len);
}
static bool escape_char(struct MBuf *dst, unsigned int c)
{
char ec;
char buf[10];
/* start escape */
if (!mbuf_write_byte(dst, '\\'))
return false;
/* escape same char */
if (c == '"' || c == '\\')
return mbuf_write_byte(dst, c);
/* low-ascii mess */
switch (c) {
case '\b': ec = 'b'; break;
case '\f': ec = 'f'; break;
case '\n': ec = 'n'; break;
case '\r': ec = 'r'; break;
case '\t': ec = 't'; break;
default:
snprintf(buf, sizeof(buf), "u%04x", c);
return mbuf_write(dst, buf, 5);
}
return mbuf_write_byte(dst, ec);
}
static bool render_string(struct RenderState *rs, struct JsonValue *jv)
{
const char *s, *last;
const char *val = get_cstring(jv);
size_t len = jv->u.v_size;
const char *end = val + len;
unsigned int c;
/* start quote */
if (!mbuf_write_byte(rs->dst, '"'))
return false;
for (s = last = val; s < end; s++) {
if (*s == '"' || *s == '\\' || (unsigned char)*s < 0x20 ||
/* Valid in JSON, but not in JS:
\u2028 - Line separator
\u2029 - Paragraph separator */
((unsigned char)s[0] == 0xE2 && (unsigned char)s[1] == 0x80 &&
((unsigned char)s[2] == 0xA8 || (unsigned char)s[2] == 0xA9)))
{
/* flush */
if (last < s) {
if (!mbuf_write(rs->dst, last, s - last))
return false;
}
if ((unsigned char)s[0] == 0xE2) {
c = 0x2028 + ((unsigned char)s[2] - 0xA8);
last = s + 3;
} else {
c = (unsigned char)*s;
last = s + 1;
}
/* output escaped char */
if (!escape_char(rs->dst, c))
return false;
}
}
/* flush */
if (last < s) {
if (!mbuf_write(rs->dst, last, s - last))
return false;
}
/* final quote */
if (!mbuf_write_byte(rs->dst, '"'))
return false;
return true;
}
/*
* Render complex values
*/
struct ElemWriterState {
struct RenderState *rs;
char sep;
};
static bool list_elem_writer(void *arg, struct JsonValue *elem)
{
struct ElemWriterState *state = arg;
if (state->sep && !mbuf_write_byte(state->rs->dst, state->sep))
return false;
state->sep = ',';
return render_any(state->rs, elem);
}
static bool render_list(struct RenderState *rs, struct JsonValue *list)
{
struct ElemWriterState state;
state.rs = rs;
state.sep = 0;
if (!mbuf_write_byte(rs->dst, '['))
return false;
if (!json_list_iter(list, list_elem_writer, &state))
return false;
if (!mbuf_write_byte(rs->dst, ']'))
return false;
return true;
}
static bool dict_elem_writer(void *ctx, struct JsonValue *key, struct JsonValue *val)
{
struct ElemWriterState *state = ctx;
if (state->sep && !mbuf_write_byte(state->rs->dst, state->sep))
return false;
state->sep = ',';
if (!render_any(state->rs, key))
return false;
if (!mbuf_write_byte(state->rs->dst, ':'))
return false;
return render_any(state->rs, val);
}
static bool render_dict(struct RenderState *rs, struct JsonValue *dict)
{
struct ElemWriterState state;
state.rs = rs;
state.sep = 0;
if (!mbuf_write_byte(rs->dst, '{'))
return false;
if (!json_dict_iter(dict, dict_elem_writer, &state))
return false;
if (!mbuf_write_byte(rs->dst, '}'))
return false;
return true;
}
static bool render_invalid(struct RenderState *rs, struct JsonValue *jv)
{
return false;
}
/*
* Public api
*/
static bool render_any(struct RenderState *rs, struct JsonValue *jv)
{
static const render_func_t rfunc_map[] = {
render_invalid, render_null, render_bool, render_int,
render_float, render_string, render_list, render_dict,
};
return rfunc_map[get_type(jv)](rs, jv);
}
bool json_render(struct MBuf *dst, struct JsonValue *jv)
{
struct RenderState rs;
rs.dst = dst;
rs.options = 0;
return render_any(&rs, jv);
}
/*
* Examine single value
*/
enum JsonValueType json_value_type(struct JsonValue *jv)
{
return get_type(jv);
}
size_t json_value_size(struct JsonValue *jv)
{
if (has_type(jv, JSON_STRING) ||
has_type(jv, JSON_LIST) ||
has_type(jv, JSON_DICT))
return jv->u.v_size;
return 0;
}
bool json_value_as_bool(struct JsonValue *jv, bool *dst_p)
{
if (!has_type(jv, JSON_BOOL))
return false;
*dst_p = jv->u.v_bool;
return true;
}
bool json_value_as_int(struct JsonValue *jv, int64_t *dst_p)
{
if (!has_type(jv, JSON_INT))
return false;
*dst_p = jv->u.v_int;
return true;
}
bool json_value_as_float(struct JsonValue *jv, double *dst_p)
{
if (!has_type(jv, JSON_FLOAT)) {
if (has_type(jv, JSON_INT)) {
*dst_p = jv->u.v_int;
return true;
}
return false;
}
*dst_p = jv->u.v_float;
return true;
}
bool json_value_as_string(struct JsonValue *jv, const char **dst_p, size_t *size_p)
{
if (!has_type(jv, JSON_STRING))
return false;
*dst_p = get_cstring(jv);
if (size_p)
*size_p = jv->u.v_size;
return true;
}
/*
* Load value from dict.
*/
static int dict_getter(struct JsonValue *dict,
const char *key, unsigned int klen,
struct JsonValue **val_p,
enum JsonValueType req_type, bool req_value)
{
struct JsonValue *val, *kjv;
struct CBTree *tree;
tree = get_dict_tree(dict);
if (!tree)
return false;
kjv = cbtree_lookup(tree, key, klen);
if (!kjv) {
if (req_value)
return false;
*val_p = NULL;
return true;
}
val = get_next(kjv);
if (!req_value && json_value_is_null(val)) {
*val_p = NULL;
return true;
}
if (!has_type(val, req_type))
return false;
*val_p = val;
return true;
}
bool json_dict_get_value(struct JsonValue *dict, const char *key, struct JsonValue **val_p)
{
struct CBTree *tree;
struct JsonValue *kjv;
size_t klen;
tree = get_dict_tree(dict);
if (!tree)
return false;
klen = strlen(key);
kjv = cbtree_lookup(tree, key, klen);
if (!kjv)
return false;
*val_p = get_next(kjv);
return true;
}
bool json_dict_is_null(struct JsonValue *dict, const char *key)
{
struct JsonValue *val;
if (!json_dict_get_value(dict, key, &val))
return true;
return has_type(val, JSON_NULL);
}
bool json_dict_get_bool(struct JsonValue *dict, const char *key, bool *dst_p)
{
struct JsonValue *val;
if (!dict_getter(dict, key, strlen(key), &val, JSON_BOOL, true))
return false;
return json_value_as_bool(val, dst_p);
}
bool json_dict_get_int(struct JsonValue *dict, const char *key, int64_t *dst_p)
{
struct JsonValue *val;
if (!dict_getter(dict, key, strlen(key), &val, JSON_INT, true))
return false;
return json_value_as_int(val, dst_p);
}
bool json_dict_get_float(struct JsonValue *dict, const char *key, double *dst_p)
{
struct JsonValue *val;
if (!dict_getter(dict, key, strlen(key), &val, JSON_FLOAT, true))
return false;
return json_value_as_float(val, dst_p);
}
bool json_dict_get_string(struct JsonValue *dict, const char *key, const char **dst_p, size_t *len_p)
{
struct JsonValue *val;
if (!dict_getter(dict, key, strlen(key), &val, JSON_STRING, true))
return false;
return json_value_as_string(val, dst_p, len_p);
}
bool json_dict_get_list(struct JsonValue *dict, const char *key, struct JsonValue **dst_p)
{
return dict_getter(dict, key, strlen(key), dst_p, JSON_LIST, true);
}
bool json_dict_get_dict(struct JsonValue *dict, const char *key, struct JsonValue **dst_p)
{
return dict_getter(dict, key, strlen(key), dst_p, JSON_DICT, true);
}
/*
* Load optional dict element.
*/
bool json_dict_get_opt_bool(struct JsonValue *dict, const char *key, bool *dst_p)
{
struct JsonValue *val;
if (!dict_getter(dict, key, strlen(key), &val, JSON_BOOL, false))
return false;
return !val || json_value_as_bool(val, dst_p);
}
bool json_dict_get_opt_int(struct JsonValue *dict, const char *key, int64_t *dst_p)
{
struct JsonValue *val;
if (!dict_getter(dict, key, strlen(key), &val, JSON_INT, false))
return false;
return !val || json_value_as_int(val, dst_p);
}
bool json_dict_get_opt_float(struct JsonValue *dict, const char *key, double *dst_p)
{
struct JsonValue *val;
if (!dict_getter(dict, key, strlen(key), &val, JSON_FLOAT, false))
return false;
return !val || json_value_as_float(val, dst_p);
}
bool json_dict_get_opt_string(struct JsonValue *dict, const char *key, const char **dst_p, size_t *len_p)
{
struct JsonValue *val;
if (!dict_getter(dict, key, strlen(key), &val, JSON_STRING, false))
return false;
return !val || json_value_as_string(val, dst_p, len_p);
}
bool json_dict_get_opt_list(struct JsonValue *dict, const char *key, struct JsonValue **dst_p)
{
struct JsonValue *val;
if (!dict_getter(dict, key, strlen(key), &val, JSON_LIST, false))
return false;
if (val)
*dst_p = val;
return true;
}
bool json_dict_get_opt_dict(struct JsonValue *dict, const char *key, struct JsonValue **dst_p)
{
struct JsonValue *val;
if (!dict_getter(dict, key, strlen(key), &val, JSON_DICT, false))
return false;
if (val)
*dst_p = val;
return true;
}
/*
* Load value from list.
*/
bool json_list_get_value(struct JsonValue *list, size_t index, struct JsonValue **val_p)
{
struct JsonValue *val;
struct ValueList *vlist;
size_t i;
vlist = get_list_vlist(list);
if (!vlist)
return false;
if (index >= list->u.v_size)
return false;
if (!vlist->array && list->u.v_size > 10)
prepare_array(list);
/* direct fetch */
if (vlist->array) {
*val_p = vlist->array[index];
return true;
}
/* walk */
val = vlist->first;
for (i = 0; val; i++) {
if (i == index) {
*val_p = val;
return true;
}
val = get_next(val);
}
return false;
}
bool json_list_is_null(struct JsonValue *list, size_t n)
{
struct JsonValue *jv;
if (!json_list_get_value(list, n, &jv))
return true;
return has_type(jv, JSON_NULL);
}
bool json_list_get_bool(struct JsonValue *list, size_t index, bool *val_p)
{
struct JsonValue *jv;
if (!json_list_get_value(list, index, &jv))
return false;
return json_value_as_bool(jv, val_p);
}
bool json_list_get_int(struct JsonValue *list, size_t index, int64_t *val_p)
{
struct JsonValue *jv;
if (!json_list_get_value(list, index, &jv))
return false;
return json_value_as_int(jv, val_p);
}
bool json_list_get_float(struct JsonValue *list, size_t index, double *val_p)
{
struct JsonValue *jv;
if (!json_list_get_value(list, index, &jv))
return false;
return json_value_as_float(jv, val_p);
}
bool json_list_get_string(struct JsonValue *list, size_t index, const char **val_p, size_t *len_p)
{
struct JsonValue *jv;
if (!json_list_get_value(list, index, &jv))
return false;
return json_value_as_string(jv, val_p, len_p);
}
bool json_list_get_list(struct JsonValue *list, size_t index, struct JsonValue **val_p)
{
struct JsonValue *jv;
if (!json_list_get_value(list, index, &jv))
return false;
if (!has_type(jv, JSON_LIST))
return false;
*val_p = jv;
return true;
}
bool json_list_get_dict(struct JsonValue *list, size_t index, struct JsonValue **val_p)
{
struct JsonValue *jv;
if (!json_list_get_value(list, index, &jv))
return false;
if (!has_type(jv, JSON_DICT))
return false;
*val_p = jv;
return true;
}
/*
* Iterate over list and dict values.
*/
struct DictIterState {
json_dict_iter_callback_f cb_func;
void *cb_arg;
};
static bool dict_iter_helper(void *arg, void *jv)
{
struct DictIterState *state = arg;
struct JsonValue *key = jv;
struct JsonValue *val = get_next(key);
return state->cb_func(state->cb_arg, key, val);
}
bool json_dict_iter(struct JsonValue *dict, json_dict_iter_callback_f cb_func, void *cb_arg)
{
struct DictIterState state;
struct CBTree *tree;
tree = get_dict_tree(dict);
if (!tree)
return false;
state.cb_func = cb_func;
state.cb_arg = cb_arg;
return cbtree_walk(tree, dict_iter_helper, &state);
}
bool json_list_iter(struct JsonValue *list, json_list_iter_callback_f cb_func, void *cb_arg)
{
struct JsonValue *elem;
struct ValueList *vlist;
vlist = get_list_vlist(list);
if (!vlist)
return false;
for (elem = vlist->first; elem; elem = get_next(elem)) {
if (!cb_func(cb_arg, elem))
return false;
}
return true;
}
/*
* Create new values.
*/
struct JsonValue *json_new_null(struct JsonContext *ctx)
{
return mk_value(ctx, JSON_NULL, 0, false);
}
struct JsonValue *json_new_bool(struct JsonContext *ctx, bool val)
{
struct JsonValue *jv;
jv = mk_value(ctx, JSON_BOOL, 0, false);
if (jv)
jv->u.v_bool = val;
return jv;
}
struct JsonValue *json_new_int(struct JsonContext *ctx, int64_t val)
{
struct JsonValue *jv;
if (val < JSON_MININT || val > JSON_MAXINT) {
errno = ERANGE;
return NULL;
}
jv = mk_value(ctx, JSON_INT, 0, false);
if (jv)
jv->u.v_int = val;
return jv;
}
struct JsonValue *json_new_float(struct JsonContext *ctx, double val)
{
struct JsonValue *jv;
/* check if value survives JSON roundtrip */
if (!isfinite(val))
return false;
jv = mk_value(ctx, JSON_FLOAT, 0, false);
if (jv)
jv->u.v_float = val;
return jv;
}
struct JsonValue *json_new_string(struct JsonContext *ctx, const char *val)
{
struct JsonValue *jv;
size_t len;
len = strlen(val);
if (!utf8_validate_string(val, val + len))
return NULL;
jv = mk_value(ctx, JSON_STRING, len + 1, false);
if (jv) {
memcpy(get_cstring(jv), val, len + 1);
jv->u.v_size = len;
}
return jv;
}
struct JsonValue *json_new_list(struct JsonContext *ctx)
{
return mk_value(ctx, JSON_LIST, LIST_EXTRA, false);
}
struct JsonValue *json_new_dict(struct JsonContext *ctx)
{
return mk_value(ctx, JSON_DICT, DICT_EXTRA, false);
}
/*
* Add to containers
*/
bool json_list_append(struct JsonValue *list, struct JsonValue *val)
{
if (!val)
return false;
if (!has_type(list, JSON_LIST))
return false;
if (!is_unattached(val))
return false;
set_parent(val, list);
set_next(val, NULL);
real_list_append(list, val);
return true;
}
bool json_list_append_null(struct JsonValue *list)
{
struct JsonValue *v;
v = json_new_null(get_context(list));
return json_list_append(list, v);
}
bool json_list_append_bool(struct JsonValue *list, bool val)
{
struct JsonValue *v;
v = json_new_bool(get_context(list), val);
return json_list_append(list, v);
}
bool json_list_append_int(struct JsonValue *list, int64_t val)
{
struct JsonValue *v;
v = json_new_int(get_context(list), val);
return json_list_append(list, v);
}
bool json_list_append_float(struct JsonValue *list, double val)
{
struct JsonValue *v;
v = json_new_float(get_context(list), val);
return json_list_append(list, v);
}
bool json_list_append_string(struct JsonValue *list, const char *val)
{
struct JsonValue *v;
v = json_new_string(get_context(list), val);
return json_list_append(list, v);
}
bool json_dict_put(struct JsonValue *dict, const char *key, struct JsonValue *val)
{
struct JsonValue *kjv;
struct JsonContainer *c;
if (!key || !val)
return false;
if (!has_type(dict, JSON_DICT))
return false;
if (!is_unattached(val))
return false;
c = get_container(dict);
kjv = json_new_string(c->c_ctx, key);
if (!kjv)
return false;
if (!real_dict_add_key(c->c_ctx, dict, kjv))
return false;
set_next(kjv, val);
set_next(val, NULL);
set_parent(val, dict);
return true;
}
bool json_dict_put_null(struct JsonValue *dict, const char *key)
{
struct JsonValue *v;
v = json_new_null(get_context(dict));
return json_dict_put(dict, key, v);
}
bool json_dict_put_bool(struct JsonValue *dict, const char *key, bool val)
{
struct JsonValue *v;
v = json_new_bool(get_context(dict), val);
return json_dict_put(dict, key, v);
}
bool json_dict_put_int(struct JsonValue *dict, const char *key, int64_t val)
{
struct JsonValue *v;
v = json_new_int(get_context(dict), val);
return json_dict_put(dict, key, v);
}
bool json_dict_put_float(struct JsonValue *dict, const char *key, double val)
{
struct JsonValue *v;
v = json_new_float(get_context(dict), val);
return json_dict_put(dict, key, v);
}
bool json_dict_put_string(struct JsonValue *dict, const char *key, const char *val)
{
struct JsonValue *v;
v = json_new_string(get_context(dict), val);
return json_dict_put(dict, key, v);
}
/*
* Main context management
*/
struct JsonContext *json_new_context(const void *cx, size_t initial_mem)
{
struct JsonContext *ctx;
CxMem *pool;
pool = cx_new_pool(cx, initial_mem, 8);
if (!pool)
return NULL;
ctx = cx_alloc0(pool, sizeof(*ctx));
if (!ctx) {
cx_destroy(pool);
return NULL;
}
ctx->pool = pool;
return ctx;
}
void json_free_context(struct JsonContext *ctx)
{
if (ctx) {
CxMem *pool = ctx->pool;
memset(ctx, 0, sizeof(*ctx));
cx_destroy(pool);
}
}
const char *json_strerror(struct JsonContext *ctx)
{
return ctx->lasterr;
}
void json_set_options(struct JsonContext *ctx, unsigned int options)
{
ctx->options = options;
}
| markokr/libusual | usual/json.c | C | isc | 38,477 |
/* Copyright information is at end of file */
#include "xmlrpc_config.h"
#include <stddef.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include "stdargx.h"
#include "xmlrpc-c/base.h"
#include "xmlrpc-c/base_int.h"
#include "xmlrpc-c/string_int.h"
static void
getString(xmlrpc_env *const envP,
const char **const formatP,
va_listx *const argsP,
xmlrpc_value **const valPP) {
const char *str;
size_t len;
str = (const char *) va_arg(argsP->v, char*);
if (*(*formatP) == '#') {
++(*formatP);
len = (size_t) va_arg(argsP->v, size_t);
} else
len = strlen(str);
*valPP = xmlrpc_string_new_lp(envP, len, str);
}
static void
getWideString(xmlrpc_env *const envP ATTR_UNUSED,
const char **const formatP ATTR_UNUSED,
va_listx *const argsP ATTR_UNUSED,
xmlrpc_value **const valPP ATTR_UNUSED) {
#if HAVE_UNICODE_WCHAR
wchar_t *wcs;
size_t len;
wcs = (wchar_t*) va_arg(argsP->v, wchar_t*);
if (**formatP == '#') {
(*formatP)++;
len = (size_t) va_arg(argsP->v, size_t);
} else
len = wcslen(wcs);
*valPP = xmlrpc_string_w_new_lp(envP, len, wcs);
#endif /* HAVE_UNICODE_WCHAR */
}
static void
getBase64(xmlrpc_env *const envP,
va_listx *const argsP,
xmlrpc_value **const valPP) {
unsigned char *value;
size_t length;
value = (unsigned char *) va_arg(argsP->v, unsigned char*);
length = (size_t) va_arg(argsP->v, size_t);
*valPP = xmlrpc_base64_new(envP, length, value);
}
static void
getValue(xmlrpc_env *const envP,
const char **const format,
va_listx *const argsP,
xmlrpc_value **const valPP);
static void
getArray(xmlrpc_env *const envP,
const char **const formatP,
char const delimiter,
va_listx *const argsP,
xmlrpc_value **const arrayPP) {
xmlrpc_value *arrayP;
arrayP = xmlrpc_array_new(envP);
/* Add items to the array until we hit our delimiter. */
while (**formatP != delimiter && !envP->fault_occurred) {
xmlrpc_value *itemP;
if (**formatP == '\0')
xmlrpc_env_set_fault(
envP, XMLRPC_INTERNAL_ERROR,
"format string ended before closing ')'.");
else {
getValue(envP, formatP, argsP, &itemP);
if (!envP->fault_occurred) {
xmlrpc_array_append_item(envP, arrayP, itemP);
xmlrpc_DECREF(itemP);
}
}
}
if (envP->fault_occurred)
xmlrpc_DECREF(arrayP);
*arrayPP = arrayP;
}
static void
getStructMember(xmlrpc_env *const envP,
const char **const formatP,
va_listx *const argsP,
xmlrpc_value **const keyPP,
xmlrpc_value **const valuePP) {
/* Get the key */
getValue(envP, formatP, argsP, keyPP);
if (!envP->fault_occurred) {
if (**formatP != ':')
xmlrpc_env_set_fault(
envP, XMLRPC_INTERNAL_ERROR,
"format string does not have ':' after a "
"structure member key.");
else {
/* Skip over colon that separates key from value */
(*formatP)++;
/* Get the value */
getValue(envP, formatP, argsP, valuePP);
}
if (envP->fault_occurred)
xmlrpc_DECREF(*keyPP);
}
}
static void
getStruct(xmlrpc_env *const envP,
const char **const formatP,
char const delimiter,
va_listx *const argsP,
xmlrpc_value **const structPP) {
xmlrpc_value *structP;
structP = xmlrpc_struct_new(envP);
if (!envP->fault_occurred) {
while (**formatP != delimiter && !envP->fault_occurred) {
xmlrpc_value *keyP;
xmlrpc_value *valueP;
getStructMember(envP, formatP, argsP, &keyP, &valueP);
if (!envP->fault_occurred) {
if (**formatP == ',')
(*formatP)++; /* Skip over the comma */
else if (**formatP == delimiter) {
/* End of the line */
} else
xmlrpc_env_set_fault(
envP, XMLRPC_INTERNAL_ERROR,
"format string does not have ',' or ')' after "
"a structure member");
if (!envP->fault_occurred)
/* Add the new member to the struct. */
xmlrpc_struct_set_value_v(envP, structP, keyP, valueP);
xmlrpc_DECREF(valueP);
xmlrpc_DECREF(keyP);
}
}
if (envP->fault_occurred)
xmlrpc_DECREF(structP);
}
*structPP = structP;
}
static void
mkArrayFromVal(xmlrpc_env *const envP,
xmlrpc_value *const value,
xmlrpc_value **const valPP) {
if (xmlrpc_value_type(value) != XMLRPC_TYPE_ARRAY)
xmlrpc_env_set_fault(envP, XMLRPC_INTERNAL_ERROR,
"Array format ('A'), non-array xmlrpc_value");
else
xmlrpc_INCREF(value);
*valPP = value;
}
static void
mkStructFromVal(xmlrpc_env *const envP,
xmlrpc_value *const value,
xmlrpc_value **const valPP) {
if (xmlrpc_value_type(value) != XMLRPC_TYPE_STRUCT)
xmlrpc_env_set_fault(envP, XMLRPC_INTERNAL_ERROR,
"Struct format ('S'), non-struct xmlrpc_value");
else
xmlrpc_INCREF(value);
*valPP = value;
}
static void
getValue(xmlrpc_env *const envP,
const char **const formatP,
va_listx *const argsP,
xmlrpc_value **const valPP) {
/*----------------------------------------------------------------------------
Get the next value from the list. *formatP points to the specifier
for the next value in the format string (i.e. to the type code
character) and we move *formatP past the whole specifier for the
next value. We read the required arguments from 'argsP'. We return
the value as *valPP with a reference to it.
For example, if *formatP points to the "i" in the string "sis",
we read one argument from 'argsP' and return as *valP an integer whose
value is the argument we read. We advance *formatP to point to the
last 's' and advance 'argsP' to point to the argument that belongs to
that 's'.
-----------------------------------------------------------------------------*/
char const formatChar = *(*formatP)++;
switch (formatChar) {
case 'i':
*valPP =
xmlrpc_int_new(envP, (xmlrpc_int32) va_arg(argsP->v,
xmlrpc_int32));
break;
case 'b':
*valPP =
xmlrpc_bool_new(envP, (xmlrpc_bool) va_arg(argsP->v,
xmlrpc_bool));
break;
case 'd':
*valPP =
xmlrpc_double_new(envP, (double) va_arg(argsP->v, double));
break;
case 's':
getString(envP, formatP, argsP, valPP);
break;
case 'w':
getWideString(envP, formatP, argsP, valPP);
break;
case 't':
*valPP = xmlrpc_datetime_new_sec(envP, va_arg(argsP->v, time_t));
break;
case '8':
*valPP = xmlrpc_datetime_new_str(envP, va_arg(argsP->v, char*));
break;
case '6':
getBase64(envP, argsP, valPP);
break;
case 'n':
*valPP =
xmlrpc_nil_new(envP);
break;
case 'I':
*valPP =
xmlrpc_i8_new(envP, (xmlrpc_int64) va_arg(argsP->v,
xmlrpc_int64));
break;
case 'p':
*valPP =
xmlrpc_cptr_new(envP, (void *) va_arg(argsP->v, void*));
break;
case 'A':
mkArrayFromVal(envP,
(xmlrpc_value *) va_arg(argsP->v, xmlrpc_value*),
valPP);
break;
case 'S':
mkStructFromVal(envP,
(xmlrpc_value *) va_arg(argsP->v, xmlrpc_value*),
valPP);
break;
case 'V':
*valPP = (xmlrpc_value *) va_arg(argsP->v, xmlrpc_value*);
xmlrpc_INCREF(*valPP);
break;
case '(':
getArray(envP, formatP, ')', argsP, valPP);
if (!envP->fault_occurred) {
XMLRPC_ASSERT(**formatP == ')');
(*formatP)++; /* Skip over closing parenthesis */
}
break;
case '{':
getStruct(envP, formatP, '}', argsP, valPP);
if (!envP->fault_occurred) {
XMLRPC_ASSERT(**formatP == '}');
(*formatP)++; /* Skip over closing brace */
}
break;
default: {
const char *const badCharacter = xmlrpc_makePrintableChar(
formatChar);
xmlrpc_env_set_fault_formatted(
envP, XMLRPC_INTERNAL_ERROR,
"Unexpected character '%s' in format string", badCharacter);
xmlrpc_strfree(badCharacter);
}
}
}
void
xmlrpc_build_value_va(xmlrpc_env *const envP,
const char *const format,
va_list const args,
xmlrpc_value **const valPP,
const char **const tailP) {
XMLRPC_ASSERT_ENV_OK(envP);
XMLRPC_ASSERT(format != NULL);
if (strlen(format) == 0)
xmlrpc_faultf(envP, "Format string is empty.");
else {
va_listx currentArgs;
const char *formatCursor;
init_va_listx(¤tArgs, args);
formatCursor = &format[0];
getValue(envP, &formatCursor, ¤tArgs, valPP);
if (!envP->fault_occurred)
XMLRPC_ASSERT_VALUE_OK(*valPP);
*tailP = formatCursor;
}
}
xmlrpc_value *
xmlrpc_build_value(xmlrpc_env *const envP,
const char *const format,
...) {
va_list args;
xmlrpc_value *retval;
const char *suffix;
va_start(args, format);
xmlrpc_build_value_va(envP, format, args, &retval, &suffix);
va_end(args);
if (!envP->fault_occurred) {
if (*suffix != '\0')
xmlrpc_faultf(envP, "Junk after the format specifier: '%s'. "
"The format string must describe exactly "
"one XML-RPC value "
"(but it might be a compound value "
"such as an array)",
suffix);
if (envP->fault_occurred)
xmlrpc_DECREF(retval);
}
return retval;
}
/* Copyright (C) 2001 by First Peer, Inc. All rights reserved.
** Copyright (C) 2001 by Eric Kidd. All rights reserved.
**
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions
** are met:
** 1. Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** 2. Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in the
** documentation and/or other materials provided with the distribution.
** 3. The name of the author may not be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
** IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
** ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
** FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
** DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
** OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
** HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
** LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
** OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
** SUCH DAMAGE. */
| arssivka/naomech | xmlrpc-c/src/xmlrpc_build.c | C | isc | 12,744 |
#include <stdarg.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <sys/endian.h>
#include <sysexits.h>
#include <mpg123.h>
#include "audio.h"
#include "mp3.h"
struct mp3 {
mpg123_handle *h;
int fd;
int first;
int rate;
int channels;
int endian;
int octets;
int sign;
};
struct mp3 *
mp3_open(const char *file) {
struct mp3 *m = NULL;
char magic[3];
long rate;
int chan;
int enc;
if ((m = malloc(sizeof(struct mp3))) == NULL)
goto err;
m->h = NULL;
if ((m->fd = open(file, O_RDONLY)) < 0)
goto err;
if (read(m->fd, magic, 3) != 3)
goto err;
if (strncmp(magic, "\xFF\xFB", 2) != 0 &&
strncmp(magic, "ID3", 3) != 0)
goto err;
if (lseek(m->fd, -3, SEEK_CUR) == -1)
goto err;
if (mpg123_init() != MPG123_OK)
return NULL;
if ((m->h = mpg123_new(NULL, NULL)) == NULL ||
mpg123_param(m->h, MPG123_ADD_FLAGS, MPG123_QUIET, 0)
!= MPG123_OK || mpg123_open_fd(m->h, m->fd) != MPG123_OK)
goto err;
if (mpg123_getformat(m->h, &rate, &chan, &enc)
!= MPG123_OK || rate > (int)(~0U >> 1)) {
mpg123_close(m->h);
goto err;
}
m->first = 1;
/* Does mpg123 always output in host byte-order? */
m->endian = BYTE_ORDER == LITTLE_ENDIAN;
m->rate = rate;
m->sign = !!(enc & MPG123_ENC_SIGNED);
if (chan & MPG123_STEREO)
m->channels = 2;
else /* MPG123_MONO */
m->channels = 1;
if (enc & MPG123_ENC_FLOAT) {
mpg123_close(m->h);
goto err;
}
if (enc & MPG123_ENC_32)
m->octets = 4;
else if (enc & MPG123_ENC_24)
m->octets = 3;
else if (enc & MPG123_ENC_16)
m->octets = 2;
else /* MPG123_ENC_8 */
m->octets = 1;
return m;
err:
if (m != NULL) {
if (m->h != NULL)
mpg123_delete(m->h);
if (m->fd >= 0)
close(m->fd);
free(m);
}
mpg123_exit();
return NULL;
}
int
mp3_copy(struct mp3 *m, void *buf, size_t size, struct audio *out) {
size_t r;
if (m == NULL || buf == NULL || size == 0 || out == NULL)
return EX_USAGE;
if (m->first) { /* setup audio output */
m->first = 0;
a_setrate(out, m->rate);
a_setchan(out, m->channels);
a_setend(out, m->endian);
a_setbits(out, m->octets << 3);
a_setsign(out, m->sign);
}
if (mpg123_read(m->h, buf, size, &r) != MPG123_OK)
return EX_SOFTWARE;
if (r == 0)
return 1;
if (a_write(out, buf, r) != r && errno != EINTR
&& errno != EAGAIN)
return EX_IOERR;
return EX_OK;
}
void
mp3_close(struct mp3 *m) {
if (m == NULL)
return;
if (m->fd >= 0)
close(m->fd);
if (m->h != NULL) {
mpg123_close(m->h);
mpg123_delete(m->h);
}
mpg123_exit();
free(m);
}
| kdhp/play | mp3.c | C | isc | 2,590 |
/*
* Copyright 2005-2019 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
/**
* The Whirlpool hashing function.
*
* See
* P.S.L.M. Barreto, V. Rijmen,
* ``The Whirlpool hashing function,''
* NESSIE submission, 2000 (tweaked version, 2001),
* <https://www.cosic.esat.kuleuven.ac.be/nessie/workshop/submissions/whirlpool.zip>
*
* Based on "@version 3.0 (2003.03.12)" by Paulo S.L.M. Barreto and
* Vincent Rijmen. Lookup "reference implementations" on
* <http://planeta.terra.com.br/informatica/paulobarreto/>
*
* =============================================================================
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "wp_locl.h"
#include <string.h>
typedef unsigned char u8;
#if (defined(_WIN32) || defined(_WIN64)) && !defined(__MINGW32)
typedef unsigned __int64 u64;
#elif defined(__arch64__)
typedef unsigned long u64;
#else
typedef unsigned long long u64;
#endif
#define ROUNDS 10
#define STRICT_ALIGNMENT
#if !defined(PEDANTIC) && (defined(__i386) || defined(__i386__) || \
defined(__x86_64) || defined(__x86_64__) || \
defined(_M_IX86) || defined(_M_AMD64) || \
defined(_M_X64))
/*
* Well, formally there're couple of other architectures, which permit
* unaligned loads, specifically those not crossing cache lines, IA-64 and
* PowerPC...
*/
# undef STRICT_ALIGNMENT
#endif
#undef SMALL_REGISTER_BANK
#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
# define SMALL_REGISTER_BANK
# if defined(WHIRLPOOL_ASM)
# ifndef OPENSSL_SMALL_FOOTPRINT
/*
* it appears that for elder non-MMX
* CPUs this is actually faster!
*/
# define OPENSSL_SMALL_FOOTPRINT
# endif
# define GO_FOR_MMX(ctx,inp,num) do { \
extern unsigned long OPENSSL_ia32cap_P[]; \
void whirlpool_block_mmx(void *,const void *,size_t); \
if (!(OPENSSL_ia32cap_P[0] & (1<<23))) break; \
whirlpool_block_mmx(ctx->H.c,inp,num); return; \
} while (0)
# endif
#endif
#undef ROTATE
#ifndef PEDANTIC
# if defined(_MSC_VER)
# if defined(_WIN64) /* applies to both IA-64 and AMD64 */
# include <stdlib.h>
# pragma intrinsic(_rotl64)
# define ROTATE(a,n) _rotl64((a),n)
# endif
# elif defined(__GNUC__) && __GNUC__>=2
# if defined(__x86_64) || defined(__x86_64__)
# if defined(L_ENDIAN)
# define ROTATE(a,n) ({ u64 ret; asm ("rolq %1,%0" \
: "=r"(ret) : "J"(n),"0"(a) : "cc"); ret; })
# elif defined(B_ENDIAN)
/*
* Most will argue that x86_64 is always little-endian. Well, yes, but
* then we have stratus.com who has modified gcc to "emulate"
* big-endian on x86. Is there evidence that they [or somebody else]
* won't do same for x86_64? Naturally no. And this line is waiting
* ready for that brave soul:-)
*/
# define ROTATE(a,n) ({ u64 ret; asm ("rorq %1,%0" \
: "=r"(ret) : "J"(n),"0"(a) : "cc"); ret; })
# endif
# elif defined(__ia64) || defined(__ia64__)
# if defined(L_ENDIAN)
# define ROTATE(a,n) ({ u64 ret; asm ("shrp %0=%1,%1,%2" \
: "=r"(ret) : "r"(a),"M"(64-(n))); ret; })
# elif defined(B_ENDIAN)
# define ROTATE(a,n) ({ u64 ret; asm ("shrp %0=%1,%1,%2" \
: "=r"(ret) : "r"(a),"M"(n)); ret; })
# endif
# endif
# endif
#endif
#if defined(OPENSSL_SMALL_FOOTPRINT)
# if !defined(ROTATE)
# if defined(L_ENDIAN) /* little-endians have to rotate left */
# define ROTATE(i,n) ((i)<<(n) ^ (i)>>(64-n))
# elif defined(B_ENDIAN) /* big-endians have to rotate right */
# define ROTATE(i,n) ((i)>>(n) ^ (i)<<(64-n))
# endif
# endif
# if defined(ROTATE) && !defined(STRICT_ALIGNMENT)
# define STRICT_ALIGNMENT /* ensure smallest table size */
# endif
#endif
/*
* Table size depends on STRICT_ALIGNMENT and whether or not endian-
* specific ROTATE macro is defined. If STRICT_ALIGNMENT is not
* defined, which is normally the case on x86[_64] CPUs, the table is
* 4KB large unconditionally. Otherwise if ROTATE is defined, the
* table is 2KB large, and otherwise - 16KB. 2KB table requires a
* whole bunch of additional rotations, but I'm willing to "trade,"
* because 16KB table certainly trashes L1 cache. I wish all CPUs
* could handle unaligned load as 4KB table doesn't trash the cache,
* nor does it require additional rotations.
*/
/*
* Note that every Cn macro expands as two loads: one byte load and
* one quadword load. One can argue that that many single-byte loads
* is too excessive, as one could load a quadword and "milk" it for
* eight 8-bit values instead. Well, yes, but in order to do so *and*
* avoid excessive loads you have to accommodate a handful of 64-bit
* values in the register bank and issue a bunch of shifts and mask.
* It's a tradeoff: loads vs. shift and mask in big register bank[!].
* On most CPUs eight single-byte loads are faster and I let other
* ones to depend on smart compiler to fold byte loads if beneficial.
* Hand-coded assembler would be another alternative:-)
*/
#ifdef STRICT_ALIGNMENT
# if defined(ROTATE)
# define N 1
# define LL(c0,c1,c2,c3,c4,c5,c6,c7) c0,c1,c2,c3,c4,c5,c6,c7
# define C0(K,i) (Cx.q[K.c[(i)*8+0]])
# define C1(K,i) ROTATE(Cx.q[K.c[(i)*8+1]],8)
# define C2(K,i) ROTATE(Cx.q[K.c[(i)*8+2]],16)
# define C3(K,i) ROTATE(Cx.q[K.c[(i)*8+3]],24)
# define C4(K,i) ROTATE(Cx.q[K.c[(i)*8+4]],32)
# define C5(K,i) ROTATE(Cx.q[K.c[(i)*8+5]],40)
# define C6(K,i) ROTATE(Cx.q[K.c[(i)*8+6]],48)
# define C7(K,i) ROTATE(Cx.q[K.c[(i)*8+7]],56)
# else
# define N 8
# define LL(c0,c1,c2,c3,c4,c5,c6,c7) c0,c1,c2,c3,c4,c5,c6,c7, \
c7,c0,c1,c2,c3,c4,c5,c6, \
c6,c7,c0,c1,c2,c3,c4,c5, \
c5,c6,c7,c0,c1,c2,c3,c4, \
c4,c5,c6,c7,c0,c1,c2,c3, \
c3,c4,c5,c6,c7,c0,c1,c2, \
c2,c3,c4,c5,c6,c7,c0,c1, \
c1,c2,c3,c4,c5,c6,c7,c0
# define C0(K,i) (Cx.q[0+8*K.c[(i)*8+0]])
# define C1(K,i) (Cx.q[1+8*K.c[(i)*8+1]])
# define C2(K,i) (Cx.q[2+8*K.c[(i)*8+2]])
# define C3(K,i) (Cx.q[3+8*K.c[(i)*8+3]])
# define C4(K,i) (Cx.q[4+8*K.c[(i)*8+4]])
# define C5(K,i) (Cx.q[5+8*K.c[(i)*8+5]])
# define C6(K,i) (Cx.q[6+8*K.c[(i)*8+6]])
# define C7(K,i) (Cx.q[7+8*K.c[(i)*8+7]])
# endif
#else
# define N 2
# define LL(c0,c1,c2,c3,c4,c5,c6,c7) c0,c1,c2,c3,c4,c5,c6,c7, \
c0,c1,c2,c3,c4,c5,c6,c7
# define C0(K,i) (((u64*)(Cx.c+0))[2*K.c[(i)*8+0]])
# define C1(K,i) (((u64*)(Cx.c+7))[2*K.c[(i)*8+1]])
# define C2(K,i) (((u64*)(Cx.c+6))[2*K.c[(i)*8+2]])
# define C3(K,i) (((u64*)(Cx.c+5))[2*K.c[(i)*8+3]])
# define C4(K,i) (((u64*)(Cx.c+4))[2*K.c[(i)*8+4]])
# define C5(K,i) (((u64*)(Cx.c+3))[2*K.c[(i)*8+5]])
# define C6(K,i) (((u64*)(Cx.c+2))[2*K.c[(i)*8+6]])
# define C7(K,i) (((u64*)(Cx.c+1))[2*K.c[(i)*8+7]])
#endif
static const
union {
u8 c[(256 * N + ROUNDS) * sizeof(u64)];
u64 q[(256 * N + ROUNDS)];
} Cx = {
{
/* Note endian-neutral representation:-) */
LL(0x18, 0x18, 0x60, 0x18, 0xc0, 0x78, 0x30, 0xd8),
LL(0x23, 0x23, 0x8c, 0x23, 0x05, 0xaf, 0x46, 0x26),
LL(0xc6, 0xc6, 0x3f, 0xc6, 0x7e, 0xf9, 0x91, 0xb8),
LL(0xe8, 0xe8, 0x87, 0xe8, 0x13, 0x6f, 0xcd, 0xfb),
LL(0x87, 0x87, 0x26, 0x87, 0x4c, 0xa1, 0x13, 0xcb),
LL(0xb8, 0xb8, 0xda, 0xb8, 0xa9, 0x62, 0x6d, 0x11),
LL(0x01, 0x01, 0x04, 0x01, 0x08, 0x05, 0x02, 0x09),
LL(0x4f, 0x4f, 0x21, 0x4f, 0x42, 0x6e, 0x9e, 0x0d),
LL(0x36, 0x36, 0xd8, 0x36, 0xad, 0xee, 0x6c, 0x9b),
LL(0xa6, 0xa6, 0xa2, 0xa6, 0x59, 0x04, 0x51, 0xff),
LL(0xd2, 0xd2, 0x6f, 0xd2, 0xde, 0xbd, 0xb9, 0x0c),
LL(0xf5, 0xf5, 0xf3, 0xf5, 0xfb, 0x06, 0xf7, 0x0e),
LL(0x79, 0x79, 0xf9, 0x79, 0xef, 0x80, 0xf2, 0x96),
LL(0x6f, 0x6f, 0xa1, 0x6f, 0x5f, 0xce, 0xde, 0x30),
LL(0x91, 0x91, 0x7e, 0x91, 0xfc, 0xef, 0x3f, 0x6d),
LL(0x52, 0x52, 0x55, 0x52, 0xaa, 0x07, 0xa4, 0xf8),
LL(0x60, 0x60, 0x9d, 0x60, 0x27, 0xfd, 0xc0, 0x47),
LL(0xbc, 0xbc, 0xca, 0xbc, 0x89, 0x76, 0x65, 0x35),
LL(0x9b, 0x9b, 0x56, 0x9b, 0xac, 0xcd, 0x2b, 0x37),
LL(0x8e, 0x8e, 0x02, 0x8e, 0x04, 0x8c, 0x01, 0x8a),
LL(0xa3, 0xa3, 0xb6, 0xa3, 0x71, 0x15, 0x5b, 0xd2),
LL(0x0c, 0x0c, 0x30, 0x0c, 0x60, 0x3c, 0x18, 0x6c),
LL(0x7b, 0x7b, 0xf1, 0x7b, 0xff, 0x8a, 0xf6, 0x84),
LL(0x35, 0x35, 0xd4, 0x35, 0xb5, 0xe1, 0x6a, 0x80),
LL(0x1d, 0x1d, 0x74, 0x1d, 0xe8, 0x69, 0x3a, 0xf5),
LL(0xe0, 0xe0, 0xa7, 0xe0, 0x53, 0x47, 0xdd, 0xb3),
LL(0xd7, 0xd7, 0x7b, 0xd7, 0xf6, 0xac, 0xb3, 0x21),
LL(0xc2, 0xc2, 0x2f, 0xc2, 0x5e, 0xed, 0x99, 0x9c),
LL(0x2e, 0x2e, 0xb8, 0x2e, 0x6d, 0x96, 0x5c, 0x43),
LL(0x4b, 0x4b, 0x31, 0x4b, 0x62, 0x7a, 0x96, 0x29),
LL(0xfe, 0xfe, 0xdf, 0xfe, 0xa3, 0x21, 0xe1, 0x5d),
LL(0x57, 0x57, 0x41, 0x57, 0x82, 0x16, 0xae, 0xd5),
LL(0x15, 0x15, 0x54, 0x15, 0xa8, 0x41, 0x2a, 0xbd),
LL(0x77, 0x77, 0xc1, 0x77, 0x9f, 0xb6, 0xee, 0xe8),
LL(0x37, 0x37, 0xdc, 0x37, 0xa5, 0xeb, 0x6e, 0x92),
LL(0xe5, 0xe5, 0xb3, 0xe5, 0x7b, 0x56, 0xd7, 0x9e),
LL(0x9f, 0x9f, 0x46, 0x9f, 0x8c, 0xd9, 0x23, 0x13),
LL(0xf0, 0xf0, 0xe7, 0xf0, 0xd3, 0x17, 0xfd, 0x23),
LL(0x4a, 0x4a, 0x35, 0x4a, 0x6a, 0x7f, 0x94, 0x20),
LL(0xda, 0xda, 0x4f, 0xda, 0x9e, 0x95, 0xa9, 0x44),
LL(0x58, 0x58, 0x7d, 0x58, 0xfa, 0x25, 0xb0, 0xa2),
LL(0xc9, 0xc9, 0x03, 0xc9, 0x06, 0xca, 0x8f, 0xcf),
LL(0x29, 0x29, 0xa4, 0x29, 0x55, 0x8d, 0x52, 0x7c),
LL(0x0a, 0x0a, 0x28, 0x0a, 0x50, 0x22, 0x14, 0x5a),
LL(0xb1, 0xb1, 0xfe, 0xb1, 0xe1, 0x4f, 0x7f, 0x50),
LL(0xa0, 0xa0, 0xba, 0xa0, 0x69, 0x1a, 0x5d, 0xc9),
LL(0x6b, 0x6b, 0xb1, 0x6b, 0x7f, 0xda, 0xd6, 0x14),
LL(0x85, 0x85, 0x2e, 0x85, 0x5c, 0xab, 0x17, 0xd9),
LL(0xbd, 0xbd, 0xce, 0xbd, 0x81, 0x73, 0x67, 0x3c),
LL(0x5d, 0x5d, 0x69, 0x5d, 0xd2, 0x34, 0xba, 0x8f),
LL(0x10, 0x10, 0x40, 0x10, 0x80, 0x50, 0x20, 0x90),
LL(0xf4, 0xf4, 0xf7, 0xf4, 0xf3, 0x03, 0xf5, 0x07),
LL(0xcb, 0xcb, 0x0b, 0xcb, 0x16, 0xc0, 0x8b, 0xdd),
LL(0x3e, 0x3e, 0xf8, 0x3e, 0xed, 0xc6, 0x7c, 0xd3),
LL(0x05, 0x05, 0x14, 0x05, 0x28, 0x11, 0x0a, 0x2d),
LL(0x67, 0x67, 0x81, 0x67, 0x1f, 0xe6, 0xce, 0x78),
LL(0xe4, 0xe4, 0xb7, 0xe4, 0x73, 0x53, 0xd5, 0x97),
LL(0x27, 0x27, 0x9c, 0x27, 0x25, 0xbb, 0x4e, 0x02),
LL(0x41, 0x41, 0x19, 0x41, 0x32, 0x58, 0x82, 0x73),
LL(0x8b, 0x8b, 0x16, 0x8b, 0x2c, 0x9d, 0x0b, 0xa7),
LL(0xa7, 0xa7, 0xa6, 0xa7, 0x51, 0x01, 0x53, 0xf6),
LL(0x7d, 0x7d, 0xe9, 0x7d, 0xcf, 0x94, 0xfa, 0xb2),
LL(0x95, 0x95, 0x6e, 0x95, 0xdc, 0xfb, 0x37, 0x49),
LL(0xd8, 0xd8, 0x47, 0xd8, 0x8e, 0x9f, 0xad, 0x56),
LL(0xfb, 0xfb, 0xcb, 0xfb, 0x8b, 0x30, 0xeb, 0x70),
LL(0xee, 0xee, 0x9f, 0xee, 0x23, 0x71, 0xc1, 0xcd),
LL(0x7c, 0x7c, 0xed, 0x7c, 0xc7, 0x91, 0xf8, 0xbb),
LL(0x66, 0x66, 0x85, 0x66, 0x17, 0xe3, 0xcc, 0x71),
LL(0xdd, 0xdd, 0x53, 0xdd, 0xa6, 0x8e, 0xa7, 0x7b),
LL(0x17, 0x17, 0x5c, 0x17, 0xb8, 0x4b, 0x2e, 0xaf),
LL(0x47, 0x47, 0x01, 0x47, 0x02, 0x46, 0x8e, 0x45),
LL(0x9e, 0x9e, 0x42, 0x9e, 0x84, 0xdc, 0x21, 0x1a),
LL(0xca, 0xca, 0x0f, 0xca, 0x1e, 0xc5, 0x89, 0xd4),
LL(0x2d, 0x2d, 0xb4, 0x2d, 0x75, 0x99, 0x5a, 0x58),
LL(0xbf, 0xbf, 0xc6, 0xbf, 0x91, 0x79, 0x63, 0x2e),
LL(0x07, 0x07, 0x1c, 0x07, 0x38, 0x1b, 0x0e, 0x3f),
LL(0xad, 0xad, 0x8e, 0xad, 0x01, 0x23, 0x47, 0xac),
LL(0x5a, 0x5a, 0x75, 0x5a, 0xea, 0x2f, 0xb4, 0xb0),
LL(0x83, 0x83, 0x36, 0x83, 0x6c, 0xb5, 0x1b, 0xef),
LL(0x33, 0x33, 0xcc, 0x33, 0x85, 0xff, 0x66, 0xb6),
LL(0x63, 0x63, 0x91, 0x63, 0x3f, 0xf2, 0xc6, 0x5c),
LL(0x02, 0x02, 0x08, 0x02, 0x10, 0x0a, 0x04, 0x12),
LL(0xaa, 0xaa, 0x92, 0xaa, 0x39, 0x38, 0x49, 0x93),
LL(0x71, 0x71, 0xd9, 0x71, 0xaf, 0xa8, 0xe2, 0xde),
LL(0xc8, 0xc8, 0x07, 0xc8, 0x0e, 0xcf, 0x8d, 0xc6),
LL(0x19, 0x19, 0x64, 0x19, 0xc8, 0x7d, 0x32, 0xd1),
LL(0x49, 0x49, 0x39, 0x49, 0x72, 0x70, 0x92, 0x3b),
LL(0xd9, 0xd9, 0x43, 0xd9, 0x86, 0x9a, 0xaf, 0x5f),
LL(0xf2, 0xf2, 0xef, 0xf2, 0xc3, 0x1d, 0xf9, 0x31),
LL(0xe3, 0xe3, 0xab, 0xe3, 0x4b, 0x48, 0xdb, 0xa8),
LL(0x5b, 0x5b, 0x71, 0x5b, 0xe2, 0x2a, 0xb6, 0xb9),
LL(0x88, 0x88, 0x1a, 0x88, 0x34, 0x92, 0x0d, 0xbc),
LL(0x9a, 0x9a, 0x52, 0x9a, 0xa4, 0xc8, 0x29, 0x3e),
LL(0x26, 0x26, 0x98, 0x26, 0x2d, 0xbe, 0x4c, 0x0b),
LL(0x32, 0x32, 0xc8, 0x32, 0x8d, 0xfa, 0x64, 0xbf),
LL(0xb0, 0xb0, 0xfa, 0xb0, 0xe9, 0x4a, 0x7d, 0x59),
LL(0xe9, 0xe9, 0x83, 0xe9, 0x1b, 0x6a, 0xcf, 0xf2),
LL(0x0f, 0x0f, 0x3c, 0x0f, 0x78, 0x33, 0x1e, 0x77),
LL(0xd5, 0xd5, 0x73, 0xd5, 0xe6, 0xa6, 0xb7, 0x33),
LL(0x80, 0x80, 0x3a, 0x80, 0x74, 0xba, 0x1d, 0xf4),
LL(0xbe, 0xbe, 0xc2, 0xbe, 0x99, 0x7c, 0x61, 0x27),
LL(0xcd, 0xcd, 0x13, 0xcd, 0x26, 0xde, 0x87, 0xeb),
LL(0x34, 0x34, 0xd0, 0x34, 0xbd, 0xe4, 0x68, 0x89),
LL(0x48, 0x48, 0x3d, 0x48, 0x7a, 0x75, 0x90, 0x32),
LL(0xff, 0xff, 0xdb, 0xff, 0xab, 0x24, 0xe3, 0x54),
LL(0x7a, 0x7a, 0xf5, 0x7a, 0xf7, 0x8f, 0xf4, 0x8d),
LL(0x90, 0x90, 0x7a, 0x90, 0xf4, 0xea, 0x3d, 0x64),
LL(0x5f, 0x5f, 0x61, 0x5f, 0xc2, 0x3e, 0xbe, 0x9d),
LL(0x20, 0x20, 0x80, 0x20, 0x1d, 0xa0, 0x40, 0x3d),
LL(0x68, 0x68, 0xbd, 0x68, 0x67, 0xd5, 0xd0, 0x0f),
LL(0x1a, 0x1a, 0x68, 0x1a, 0xd0, 0x72, 0x34, 0xca),
LL(0xae, 0xae, 0x82, 0xae, 0x19, 0x2c, 0x41, 0xb7),
LL(0xb4, 0xb4, 0xea, 0xb4, 0xc9, 0x5e, 0x75, 0x7d),
LL(0x54, 0x54, 0x4d, 0x54, 0x9a, 0x19, 0xa8, 0xce),
LL(0x93, 0x93, 0x76, 0x93, 0xec, 0xe5, 0x3b, 0x7f),
LL(0x22, 0x22, 0x88, 0x22, 0x0d, 0xaa, 0x44, 0x2f),
LL(0x64, 0x64, 0x8d, 0x64, 0x07, 0xe9, 0xc8, 0x63),
LL(0xf1, 0xf1, 0xe3, 0xf1, 0xdb, 0x12, 0xff, 0x2a),
LL(0x73, 0x73, 0xd1, 0x73, 0xbf, 0xa2, 0xe6, 0xcc),
LL(0x12, 0x12, 0x48, 0x12, 0x90, 0x5a, 0x24, 0x82),
LL(0x40, 0x40, 0x1d, 0x40, 0x3a, 0x5d, 0x80, 0x7a),
LL(0x08, 0x08, 0x20, 0x08, 0x40, 0x28, 0x10, 0x48),
LL(0xc3, 0xc3, 0x2b, 0xc3, 0x56, 0xe8, 0x9b, 0x95),
LL(0xec, 0xec, 0x97, 0xec, 0x33, 0x7b, 0xc5, 0xdf),
LL(0xdb, 0xdb, 0x4b, 0xdb, 0x96, 0x90, 0xab, 0x4d),
LL(0xa1, 0xa1, 0xbe, 0xa1, 0x61, 0x1f, 0x5f, 0xc0),
LL(0x8d, 0x8d, 0x0e, 0x8d, 0x1c, 0x83, 0x07, 0x91),
LL(0x3d, 0x3d, 0xf4, 0x3d, 0xf5, 0xc9, 0x7a, 0xc8),
LL(0x97, 0x97, 0x66, 0x97, 0xcc, 0xf1, 0x33, 0x5b),
LL(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00),
LL(0xcf, 0xcf, 0x1b, 0xcf, 0x36, 0xd4, 0x83, 0xf9),
LL(0x2b, 0x2b, 0xac, 0x2b, 0x45, 0x87, 0x56, 0x6e),
LL(0x76, 0x76, 0xc5, 0x76, 0x97, 0xb3, 0xec, 0xe1),
LL(0x82, 0x82, 0x32, 0x82, 0x64, 0xb0, 0x19, 0xe6),
LL(0xd6, 0xd6, 0x7f, 0xd6, 0xfe, 0xa9, 0xb1, 0x28),
LL(0x1b, 0x1b, 0x6c, 0x1b, 0xd8, 0x77, 0x36, 0xc3),
LL(0xb5, 0xb5, 0xee, 0xb5, 0xc1, 0x5b, 0x77, 0x74),
LL(0xaf, 0xaf, 0x86, 0xaf, 0x11, 0x29, 0x43, 0xbe),
LL(0x6a, 0x6a, 0xb5, 0x6a, 0x77, 0xdf, 0xd4, 0x1d),
LL(0x50, 0x50, 0x5d, 0x50, 0xba, 0x0d, 0xa0, 0xea),
LL(0x45, 0x45, 0x09, 0x45, 0x12, 0x4c, 0x8a, 0x57),
LL(0xf3, 0xf3, 0xeb, 0xf3, 0xcb, 0x18, 0xfb, 0x38),
LL(0x30, 0x30, 0xc0, 0x30, 0x9d, 0xf0, 0x60, 0xad),
LL(0xef, 0xef, 0x9b, 0xef, 0x2b, 0x74, 0xc3, 0xc4),
LL(0x3f, 0x3f, 0xfc, 0x3f, 0xe5, 0xc3, 0x7e, 0xda),
LL(0x55, 0x55, 0x49, 0x55, 0x92, 0x1c, 0xaa, 0xc7),
LL(0xa2, 0xa2, 0xb2, 0xa2, 0x79, 0x10, 0x59, 0xdb),
LL(0xea, 0xea, 0x8f, 0xea, 0x03, 0x65, 0xc9, 0xe9),
LL(0x65, 0x65, 0x89, 0x65, 0x0f, 0xec, 0xca, 0x6a),
LL(0xba, 0xba, 0xd2, 0xba, 0xb9, 0x68, 0x69, 0x03),
LL(0x2f, 0x2f, 0xbc, 0x2f, 0x65, 0x93, 0x5e, 0x4a),
LL(0xc0, 0xc0, 0x27, 0xc0, 0x4e, 0xe7, 0x9d, 0x8e),
LL(0xde, 0xde, 0x5f, 0xde, 0xbe, 0x81, 0xa1, 0x60),
LL(0x1c, 0x1c, 0x70, 0x1c, 0xe0, 0x6c, 0x38, 0xfc),
LL(0xfd, 0xfd, 0xd3, 0xfd, 0xbb, 0x2e, 0xe7, 0x46),
LL(0x4d, 0x4d, 0x29, 0x4d, 0x52, 0x64, 0x9a, 0x1f),
LL(0x92, 0x92, 0x72, 0x92, 0xe4, 0xe0, 0x39, 0x76),
LL(0x75, 0x75, 0xc9, 0x75, 0x8f, 0xbc, 0xea, 0xfa),
LL(0x06, 0x06, 0x18, 0x06, 0x30, 0x1e, 0x0c, 0x36),
LL(0x8a, 0x8a, 0x12, 0x8a, 0x24, 0x98, 0x09, 0xae),
LL(0xb2, 0xb2, 0xf2, 0xb2, 0xf9, 0x40, 0x79, 0x4b),
LL(0xe6, 0xe6, 0xbf, 0xe6, 0x63, 0x59, 0xd1, 0x85),
LL(0x0e, 0x0e, 0x38, 0x0e, 0x70, 0x36, 0x1c, 0x7e),
LL(0x1f, 0x1f, 0x7c, 0x1f, 0xf8, 0x63, 0x3e, 0xe7),
LL(0x62, 0x62, 0x95, 0x62, 0x37, 0xf7, 0xc4, 0x55),
LL(0xd4, 0xd4, 0x77, 0xd4, 0xee, 0xa3, 0xb5, 0x3a),
LL(0xa8, 0xa8, 0x9a, 0xa8, 0x29, 0x32, 0x4d, 0x81),
LL(0x96, 0x96, 0x62, 0x96, 0xc4, 0xf4, 0x31, 0x52),
LL(0xf9, 0xf9, 0xc3, 0xf9, 0x9b, 0x3a, 0xef, 0x62),
LL(0xc5, 0xc5, 0x33, 0xc5, 0x66, 0xf6, 0x97, 0xa3),
LL(0x25, 0x25, 0x94, 0x25, 0x35, 0xb1, 0x4a, 0x10),
LL(0x59, 0x59, 0x79, 0x59, 0xf2, 0x20, 0xb2, 0xab),
LL(0x84, 0x84, 0x2a, 0x84, 0x54, 0xae, 0x15, 0xd0),
LL(0x72, 0x72, 0xd5, 0x72, 0xb7, 0xa7, 0xe4, 0xc5),
LL(0x39, 0x39, 0xe4, 0x39, 0xd5, 0xdd, 0x72, 0xec),
LL(0x4c, 0x4c, 0x2d, 0x4c, 0x5a, 0x61, 0x98, 0x16),
LL(0x5e, 0x5e, 0x65, 0x5e, 0xca, 0x3b, 0xbc, 0x94),
LL(0x78, 0x78, 0xfd, 0x78, 0xe7, 0x85, 0xf0, 0x9f),
LL(0x38, 0x38, 0xe0, 0x38, 0xdd, 0xd8, 0x70, 0xe5),
LL(0x8c, 0x8c, 0x0a, 0x8c, 0x14, 0x86, 0x05, 0x98),
LL(0xd1, 0xd1, 0x63, 0xd1, 0xc6, 0xb2, 0xbf, 0x17),
LL(0xa5, 0xa5, 0xae, 0xa5, 0x41, 0x0b, 0x57, 0xe4),
LL(0xe2, 0xe2, 0xaf, 0xe2, 0x43, 0x4d, 0xd9, 0xa1),
LL(0x61, 0x61, 0x99, 0x61, 0x2f, 0xf8, 0xc2, 0x4e),
LL(0xb3, 0xb3, 0xf6, 0xb3, 0xf1, 0x45, 0x7b, 0x42),
LL(0x21, 0x21, 0x84, 0x21, 0x15, 0xa5, 0x42, 0x34),
LL(0x9c, 0x9c, 0x4a, 0x9c, 0x94, 0xd6, 0x25, 0x08),
LL(0x1e, 0x1e, 0x78, 0x1e, 0xf0, 0x66, 0x3c, 0xee),
LL(0x43, 0x43, 0x11, 0x43, 0x22, 0x52, 0x86, 0x61),
LL(0xc7, 0xc7, 0x3b, 0xc7, 0x76, 0xfc, 0x93, 0xb1),
LL(0xfc, 0xfc, 0xd7, 0xfc, 0xb3, 0x2b, 0xe5, 0x4f),
LL(0x04, 0x04, 0x10, 0x04, 0x20, 0x14, 0x08, 0x24),
LL(0x51, 0x51, 0x59, 0x51, 0xb2, 0x08, 0xa2, 0xe3),
LL(0x99, 0x99, 0x5e, 0x99, 0xbc, 0xc7, 0x2f, 0x25),
LL(0x6d, 0x6d, 0xa9, 0x6d, 0x4f, 0xc4, 0xda, 0x22),
LL(0x0d, 0x0d, 0x34, 0x0d, 0x68, 0x39, 0x1a, 0x65),
LL(0xfa, 0xfa, 0xcf, 0xfa, 0x83, 0x35, 0xe9, 0x79),
LL(0xdf, 0xdf, 0x5b, 0xdf, 0xb6, 0x84, 0xa3, 0x69),
LL(0x7e, 0x7e, 0xe5, 0x7e, 0xd7, 0x9b, 0xfc, 0xa9),
LL(0x24, 0x24, 0x90, 0x24, 0x3d, 0xb4, 0x48, 0x19),
LL(0x3b, 0x3b, 0xec, 0x3b, 0xc5, 0xd7, 0x76, 0xfe),
LL(0xab, 0xab, 0x96, 0xab, 0x31, 0x3d, 0x4b, 0x9a),
LL(0xce, 0xce, 0x1f, 0xce, 0x3e, 0xd1, 0x81, 0xf0),
LL(0x11, 0x11, 0x44, 0x11, 0x88, 0x55, 0x22, 0x99),
LL(0x8f, 0x8f, 0x06, 0x8f, 0x0c, 0x89, 0x03, 0x83),
LL(0x4e, 0x4e, 0x25, 0x4e, 0x4a, 0x6b, 0x9c, 0x04),
LL(0xb7, 0xb7, 0xe6, 0xb7, 0xd1, 0x51, 0x73, 0x66),
LL(0xeb, 0xeb, 0x8b, 0xeb, 0x0b, 0x60, 0xcb, 0xe0),
LL(0x3c, 0x3c, 0xf0, 0x3c, 0xfd, 0xcc, 0x78, 0xc1),
LL(0x81, 0x81, 0x3e, 0x81, 0x7c, 0xbf, 0x1f, 0xfd),
LL(0x94, 0x94, 0x6a, 0x94, 0xd4, 0xfe, 0x35, 0x40),
LL(0xf7, 0xf7, 0xfb, 0xf7, 0xeb, 0x0c, 0xf3, 0x1c),
LL(0xb9, 0xb9, 0xde, 0xb9, 0xa1, 0x67, 0x6f, 0x18),
LL(0x13, 0x13, 0x4c, 0x13, 0x98, 0x5f, 0x26, 0x8b),
LL(0x2c, 0x2c, 0xb0, 0x2c, 0x7d, 0x9c, 0x58, 0x51),
LL(0xd3, 0xd3, 0x6b, 0xd3, 0xd6, 0xb8, 0xbb, 0x05),
LL(0xe7, 0xe7, 0xbb, 0xe7, 0x6b, 0x5c, 0xd3, 0x8c),
LL(0x6e, 0x6e, 0xa5, 0x6e, 0x57, 0xcb, 0xdc, 0x39),
LL(0xc4, 0xc4, 0x37, 0xc4, 0x6e, 0xf3, 0x95, 0xaa),
LL(0x03, 0x03, 0x0c, 0x03, 0x18, 0x0f, 0x06, 0x1b),
LL(0x56, 0x56, 0x45, 0x56, 0x8a, 0x13, 0xac, 0xdc),
LL(0x44, 0x44, 0x0d, 0x44, 0x1a, 0x49, 0x88, 0x5e),
LL(0x7f, 0x7f, 0xe1, 0x7f, 0xdf, 0x9e, 0xfe, 0xa0),
LL(0xa9, 0xa9, 0x9e, 0xa9, 0x21, 0x37, 0x4f, 0x88),
LL(0x2a, 0x2a, 0xa8, 0x2a, 0x4d, 0x82, 0x54, 0x67),
LL(0xbb, 0xbb, 0xd6, 0xbb, 0xb1, 0x6d, 0x6b, 0x0a),
LL(0xc1, 0xc1, 0x23, 0xc1, 0x46, 0xe2, 0x9f, 0x87),
LL(0x53, 0x53, 0x51, 0x53, 0xa2, 0x02, 0xa6, 0xf1),
LL(0xdc, 0xdc, 0x57, 0xdc, 0xae, 0x8b, 0xa5, 0x72),
LL(0x0b, 0x0b, 0x2c, 0x0b, 0x58, 0x27, 0x16, 0x53),
LL(0x9d, 0x9d, 0x4e, 0x9d, 0x9c, 0xd3, 0x27, 0x01),
LL(0x6c, 0x6c, 0xad, 0x6c, 0x47, 0xc1, 0xd8, 0x2b),
LL(0x31, 0x31, 0xc4, 0x31, 0x95, 0xf5, 0x62, 0xa4),
LL(0x74, 0x74, 0xcd, 0x74, 0x87, 0xb9, 0xe8, 0xf3),
LL(0xf6, 0xf6, 0xff, 0xf6, 0xe3, 0x09, 0xf1, 0x15),
LL(0x46, 0x46, 0x05, 0x46, 0x0a, 0x43, 0x8c, 0x4c),
LL(0xac, 0xac, 0x8a, 0xac, 0x09, 0x26, 0x45, 0xa5),
LL(0x89, 0x89, 0x1e, 0x89, 0x3c, 0x97, 0x0f, 0xb5),
LL(0x14, 0x14, 0x50, 0x14, 0xa0, 0x44, 0x28, 0xb4),
LL(0xe1, 0xe1, 0xa3, 0xe1, 0x5b, 0x42, 0xdf, 0xba),
LL(0x16, 0x16, 0x58, 0x16, 0xb0, 0x4e, 0x2c, 0xa6),
LL(0x3a, 0x3a, 0xe8, 0x3a, 0xcd, 0xd2, 0x74, 0xf7),
LL(0x69, 0x69, 0xb9, 0x69, 0x6f, 0xd0, 0xd2, 0x06),
LL(0x09, 0x09, 0x24, 0x09, 0x48, 0x2d, 0x12, 0x41),
LL(0x70, 0x70, 0xdd, 0x70, 0xa7, 0xad, 0xe0, 0xd7),
LL(0xb6, 0xb6, 0xe2, 0xb6, 0xd9, 0x54, 0x71, 0x6f),
LL(0xd0, 0xd0, 0x67, 0xd0, 0xce, 0xb7, 0xbd, 0x1e),
LL(0xed, 0xed, 0x93, 0xed, 0x3b, 0x7e, 0xc7, 0xd6),
LL(0xcc, 0xcc, 0x17, 0xcc, 0x2e, 0xdb, 0x85, 0xe2),
LL(0x42, 0x42, 0x15, 0x42, 0x2a, 0x57, 0x84, 0x68),
LL(0x98, 0x98, 0x5a, 0x98, 0xb4, 0xc2, 0x2d, 0x2c),
LL(0xa4, 0xa4, 0xaa, 0xa4, 0x49, 0x0e, 0x55, 0xed),
LL(0x28, 0x28, 0xa0, 0x28, 0x5d, 0x88, 0x50, 0x75),
LL(0x5c, 0x5c, 0x6d, 0x5c, 0xda, 0x31, 0xb8, 0x86),
LL(0xf8, 0xf8, 0xc7, 0xf8, 0x93, 0x3f, 0xed, 0x6b),
LL(0x86, 0x86, 0x22, 0x86, 0x44, 0xa4, 0x11, 0xc2),
#define RC (&(Cx.q[256*N]))
0x18, 0x23, 0xc6, 0xe8, 0x87, 0xb8, 0x01, 0x4f,
/* rc[ROUNDS] */
0x36, 0xa6, 0xd2, 0xf5, 0x79, 0x6f, 0x91, 0x52, 0x60, 0xbc, 0x9b,
0x8e, 0xa3, 0x0c, 0x7b, 0x35, 0x1d, 0xe0, 0xd7, 0xc2, 0x2e, 0x4b,
0xfe, 0x57, 0x15, 0x77, 0x37, 0xe5, 0x9f, 0xf0, 0x4a, 0xda, 0x58,
0xc9, 0x29, 0x0a, 0xb1, 0xa0, 0x6b, 0x85, 0xbd, 0x5d, 0x10, 0xf4,
0xcb, 0x3e, 0x05, 0x67, 0xe4, 0x27, 0x41, 0x8b, 0xa7, 0x7d, 0x95,
0xd8, 0xfb, 0xee, 0x7c, 0x66, 0xdd, 0x17, 0x47, 0x9e, 0xca, 0x2d,
0xbf, 0x07, 0xad, 0x5a, 0x83, 0x33
}
};
void whirlpool_block(WHIRLPOOL_CTX *ctx, const void *inp, size_t n)
{
int r;
const u8 *p = inp;
union {
u64 q[8];
u8 c[64];
} S, K, *H = (void *)ctx->H.q;
#ifdef GO_FOR_MMX
GO_FOR_MMX(ctx, inp, n);
#endif
do {
#ifdef OPENSSL_SMALL_FOOTPRINT
u64 L[8];
int i;
for (i = 0; i < 64; i++)
S.c[i] = (K.c[i] = H->c[i]) ^ p[i];
for (r = 0; r < ROUNDS; r++) {
for (i = 0; i < 8; i++) {
L[i] = i ? 0 : RC[r];
L[i] ^= C0(K, i) ^ C1(K, (i - 1) & 7) ^
C2(K, (i - 2) & 7) ^ C3(K, (i - 3) & 7) ^
C4(K, (i - 4) & 7) ^ C5(K, (i - 5) & 7) ^
C6(K, (i - 6) & 7) ^ C7(K, (i - 7) & 7);
}
memcpy(K.q, L, 64);
for (i = 0; i < 8; i++) {
L[i] ^= C0(S, i) ^ C1(S, (i - 1) & 7) ^
C2(S, (i - 2) & 7) ^ C3(S, (i - 3) & 7) ^
C4(S, (i - 4) & 7) ^ C5(S, (i - 5) & 7) ^
C6(S, (i - 6) & 7) ^ C7(S, (i - 7) & 7);
}
memcpy(S.q, L, 64);
}
for (i = 0; i < 64; i++)
H->c[i] ^= S.c[i] ^ p[i];
#else
u64 L0, L1, L2, L3, L4, L5, L6, L7;
# ifdef STRICT_ALIGNMENT
if ((size_t)p & 7) {
memcpy(S.c, p, 64);
S.q[0] ^= (K.q[0] = H->q[0]);
S.q[1] ^= (K.q[1] = H->q[1]);
S.q[2] ^= (K.q[2] = H->q[2]);
S.q[3] ^= (K.q[3] = H->q[3]);
S.q[4] ^= (K.q[4] = H->q[4]);
S.q[5] ^= (K.q[5] = H->q[5]);
S.q[6] ^= (K.q[6] = H->q[6]);
S.q[7] ^= (K.q[7] = H->q[7]);
} else
# endif
{
const u64 *pa = (const u64 *)p;
S.q[0] = (K.q[0] = H->q[0]) ^ pa[0];
S.q[1] = (K.q[1] = H->q[1]) ^ pa[1];
S.q[2] = (K.q[2] = H->q[2]) ^ pa[2];
S.q[3] = (K.q[3] = H->q[3]) ^ pa[3];
S.q[4] = (K.q[4] = H->q[4]) ^ pa[4];
S.q[5] = (K.q[5] = H->q[5]) ^ pa[5];
S.q[6] = (K.q[6] = H->q[6]) ^ pa[6];
S.q[7] = (K.q[7] = H->q[7]) ^ pa[7];
}
for (r = 0; r < ROUNDS; r++) {
# ifdef SMALL_REGISTER_BANK
L0 = C0(K, 0) ^ C1(K, 7) ^ C2(K, 6) ^ C3(K, 5) ^
C4(K, 4) ^ C5(K, 3) ^ C6(K, 2) ^ C7(K, 1) ^ RC[r];
L1 = C0(K, 1) ^ C1(K, 0) ^ C2(K, 7) ^ C3(K, 6) ^
C4(K, 5) ^ C5(K, 4) ^ C6(K, 3) ^ C7(K, 2);
L2 = C0(K, 2) ^ C1(K, 1) ^ C2(K, 0) ^ C3(K, 7) ^
C4(K, 6) ^ C5(K, 5) ^ C6(K, 4) ^ C7(K, 3);
L3 = C0(K, 3) ^ C1(K, 2) ^ C2(K, 1) ^ C3(K, 0) ^
C4(K, 7) ^ C5(K, 6) ^ C6(K, 5) ^ C7(K, 4);
L4 = C0(K, 4) ^ C1(K, 3) ^ C2(K, 2) ^ C3(K, 1) ^
C4(K, 0) ^ C5(K, 7) ^ C6(K, 6) ^ C7(K, 5);
L5 = C0(K, 5) ^ C1(K, 4) ^ C2(K, 3) ^ C3(K, 2) ^
C4(K, 1) ^ C5(K, 0) ^ C6(K, 7) ^ C7(K, 6);
L6 = C0(K, 6) ^ C1(K, 5) ^ C2(K, 4) ^ C3(K, 3) ^
C4(K, 2) ^ C5(K, 1) ^ C6(K, 0) ^ C7(K, 7);
L7 = C0(K, 7) ^ C1(K, 6) ^ C2(K, 5) ^ C3(K, 4) ^
C4(K, 3) ^ C5(K, 2) ^ C6(K, 1) ^ C7(K, 0);
K.q[0] = L0;
K.q[1] = L1;
K.q[2] = L2;
K.q[3] = L3;
K.q[4] = L4;
K.q[5] = L5;
K.q[6] = L6;
K.q[7] = L7;
L0 ^= C0(S, 0) ^ C1(S, 7) ^ C2(S, 6) ^ C3(S, 5) ^
C4(S, 4) ^ C5(S, 3) ^ C6(S, 2) ^ C7(S, 1);
L1 ^= C0(S, 1) ^ C1(S, 0) ^ C2(S, 7) ^ C3(S, 6) ^
C4(S, 5) ^ C5(S, 4) ^ C6(S, 3) ^ C7(S, 2);
L2 ^= C0(S, 2) ^ C1(S, 1) ^ C2(S, 0) ^ C3(S, 7) ^
C4(S, 6) ^ C5(S, 5) ^ C6(S, 4) ^ C7(S, 3);
L3 ^= C0(S, 3) ^ C1(S, 2) ^ C2(S, 1) ^ C3(S, 0) ^
C4(S, 7) ^ C5(S, 6) ^ C6(S, 5) ^ C7(S, 4);
L4 ^= C0(S, 4) ^ C1(S, 3) ^ C2(S, 2) ^ C3(S, 1) ^
C4(S, 0) ^ C5(S, 7) ^ C6(S, 6) ^ C7(S, 5);
L5 ^= C0(S, 5) ^ C1(S, 4) ^ C2(S, 3) ^ C3(S, 2) ^
C4(S, 1) ^ C5(S, 0) ^ C6(S, 7) ^ C7(S, 6);
L6 ^= C0(S, 6) ^ C1(S, 5) ^ C2(S, 4) ^ C3(S, 3) ^
C4(S, 2) ^ C5(S, 1) ^ C6(S, 0) ^ C7(S, 7);
L7 ^= C0(S, 7) ^ C1(S, 6) ^ C2(S, 5) ^ C3(S, 4) ^
C4(S, 3) ^ C5(S, 2) ^ C6(S, 1) ^ C7(S, 0);
S.q[0] = L0;
S.q[1] = L1;
S.q[2] = L2;
S.q[3] = L3;
S.q[4] = L4;
S.q[5] = L5;
S.q[6] = L6;
S.q[7] = L7;
# else
L0 = C0(K, 0);
L1 = C1(K, 0);
L2 = C2(K, 0);
L3 = C3(K, 0);
L4 = C4(K, 0);
L5 = C5(K, 0);
L6 = C6(K, 0);
L7 = C7(K, 0);
L0 ^= RC[r];
L1 ^= C0(K, 1);
L2 ^= C1(K, 1);
L3 ^= C2(K, 1);
L4 ^= C3(K, 1);
L5 ^= C4(K, 1);
L6 ^= C5(K, 1);
L7 ^= C6(K, 1);
L0 ^= C7(K, 1);
L2 ^= C0(K, 2);
L3 ^= C1(K, 2);
L4 ^= C2(K, 2);
L5 ^= C3(K, 2);
L6 ^= C4(K, 2);
L7 ^= C5(K, 2);
L0 ^= C6(K, 2);
L1 ^= C7(K, 2);
L3 ^= C0(K, 3);
L4 ^= C1(K, 3);
L5 ^= C2(K, 3);
L6 ^= C3(K, 3);
L7 ^= C4(K, 3);
L0 ^= C5(K, 3);
L1 ^= C6(K, 3);
L2 ^= C7(K, 3);
L4 ^= C0(K, 4);
L5 ^= C1(K, 4);
L6 ^= C2(K, 4);
L7 ^= C3(K, 4);
L0 ^= C4(K, 4);
L1 ^= C5(K, 4);
L2 ^= C6(K, 4);
L3 ^= C7(K, 4);
L5 ^= C0(K, 5);
L6 ^= C1(K, 5);
L7 ^= C2(K, 5);
L0 ^= C3(K, 5);
L1 ^= C4(K, 5);
L2 ^= C5(K, 5);
L3 ^= C6(K, 5);
L4 ^= C7(K, 5);
L6 ^= C0(K, 6);
L7 ^= C1(K, 6);
L0 ^= C2(K, 6);
L1 ^= C3(K, 6);
L2 ^= C4(K, 6);
L3 ^= C5(K, 6);
L4 ^= C6(K, 6);
L5 ^= C7(K, 6);
L7 ^= C0(K, 7);
L0 ^= C1(K, 7);
L1 ^= C2(K, 7);
L2 ^= C3(K, 7);
L3 ^= C4(K, 7);
L4 ^= C5(K, 7);
L5 ^= C6(K, 7);
L6 ^= C7(K, 7);
K.q[0] = L0;
K.q[1] = L1;
K.q[2] = L2;
K.q[3] = L3;
K.q[4] = L4;
K.q[5] = L5;
K.q[6] = L6;
K.q[7] = L7;
L0 ^= C0(S, 0);
L1 ^= C1(S, 0);
L2 ^= C2(S, 0);
L3 ^= C3(S, 0);
L4 ^= C4(S, 0);
L5 ^= C5(S, 0);
L6 ^= C6(S, 0);
L7 ^= C7(S, 0);
L1 ^= C0(S, 1);
L2 ^= C1(S, 1);
L3 ^= C2(S, 1);
L4 ^= C3(S, 1);
L5 ^= C4(S, 1);
L6 ^= C5(S, 1);
L7 ^= C6(S, 1);
L0 ^= C7(S, 1);
L2 ^= C0(S, 2);
L3 ^= C1(S, 2);
L4 ^= C2(S, 2);
L5 ^= C3(S, 2);
L6 ^= C4(S, 2);
L7 ^= C5(S, 2);
L0 ^= C6(S, 2);
L1 ^= C7(S, 2);
L3 ^= C0(S, 3);
L4 ^= C1(S, 3);
L5 ^= C2(S, 3);
L6 ^= C3(S, 3);
L7 ^= C4(S, 3);
L0 ^= C5(S, 3);
L1 ^= C6(S, 3);
L2 ^= C7(S, 3);
L4 ^= C0(S, 4);
L5 ^= C1(S, 4);
L6 ^= C2(S, 4);
L7 ^= C3(S, 4);
L0 ^= C4(S, 4);
L1 ^= C5(S, 4);
L2 ^= C6(S, 4);
L3 ^= C7(S, 4);
L5 ^= C0(S, 5);
L6 ^= C1(S, 5);
L7 ^= C2(S, 5);
L0 ^= C3(S, 5);
L1 ^= C4(S, 5);
L2 ^= C5(S, 5);
L3 ^= C6(S, 5);
L4 ^= C7(S, 5);
L6 ^= C0(S, 6);
L7 ^= C1(S, 6);
L0 ^= C2(S, 6);
L1 ^= C3(S, 6);
L2 ^= C4(S, 6);
L3 ^= C5(S, 6);
L4 ^= C6(S, 6);
L5 ^= C7(S, 6);
L7 ^= C0(S, 7);
L0 ^= C1(S, 7);
L1 ^= C2(S, 7);
L2 ^= C3(S, 7);
L3 ^= C4(S, 7);
L4 ^= C5(S, 7);
L5 ^= C6(S, 7);
L6 ^= C7(S, 7);
S.q[0] = L0;
S.q[1] = L1;
S.q[2] = L2;
S.q[3] = L3;
S.q[4] = L4;
S.q[5] = L5;
S.q[6] = L6;
S.q[7] = L7;
# endif
}
# ifdef STRICT_ALIGNMENT
if ((size_t)p & 7) {
int i;
for (i = 0; i < 64; i++)
H->c[i] ^= S.c[i] ^ p[i];
} else
# endif
{
const u64 *pa = (const u64 *)p;
H->q[0] ^= S.q[0] ^ pa[0];
H->q[1] ^= S.q[1] ^ pa[1];
H->q[2] ^= S.q[2] ^ pa[2];
H->q[3] ^= S.q[3] ^ pa[3];
H->q[4] ^= S.q[4] ^ pa[4];
H->q[5] ^= S.q[5] ^ pa[5];
H->q[6] ^= S.q[6] ^ pa[6];
H->q[7] ^= S.q[7] ^ pa[7];
}
#endif
p += 64;
} while (--n);
}
| ibc/MediaSoup | worker/deps/openssl/openssl/crypto/whrlpool/wp_block.c | C | isc | 34,797 |
/* Copyright (c) 2016, 2021 Dennis Wölfing
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* libc/src/stdio/printf.c
* Print format.
*/
#include <stdarg.h>
#include <stdio.h>
int printf(const char* restrict format, ...) {
va_list ap;
va_start(ap, format);
int result = vfprintf(stdout, format, ap);
va_end(ap);
return result;
}
| dennis95/dennix | libc/src/stdio/printf.c | C | isc | 1,043 |
/*-
* builtin.c
* This file is part of libmetha
*
* Copyright (c) 2008, Emil Romanus <emil.romanus@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* http://bithack.se/projects/methabot/
*/
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <sys/stat.h>
#include "errors.h"
#include "ftpparse.h"
#include "worker.h"
#include "urlengine.h"
#include "io.h"
#include "builtin.h"
/**
* Builtin parsers except for the html parser which is in html.c
**/
struct {
const char *name;
int len;
} protocols[] = {
{"http", 4},
{"ftp", 3},
};
/**
* Default CSS parser
**/
M_CODE
lm_parser_css(worker_t *w, iobuf_t *buf, uehandle_t *ue_h,
url_t *url, attr_list_t *al)
{
return lm_extract_css_urls(ue_h, buf->ptr, buf->sz);
}
/**
* download the data to a local file instead of
* to memory
*
* the parser chain will receive the file name in
* this.data instead of the real buffer.
**/
M_CODE
lm_handler_writefile(worker_t *w, iohandle_t *h,
url_t *url)
{
int r;
char *name;
char *ext;
char *s;
int x;
int ext_offs;
int a_sz;
int sz;
struct stat st;
/**
* create a filename to download to
**/
if (url->ext_o) {
for (x = url->ext_o; *(url->str+x) && *(url->str+x) != '?'; x++)
;
if (!(ext = malloc(x-url->ext_o+1)))
return M_OUT_OF_MEM;
memcpy(ext, url->str+url->ext_o, x-url->ext_o);
ext[x-url->ext_o] = '\0';
ext_offs = url->ext_o-(url->file_o+1);
} else {
ext = strdup("");
for (x = url->file_o+1; *(url->str+x) && *(url->str+x) != '?'; x++)
;
ext_offs = x-(url->file_o+1);
}
if (url->file_o+1 == url->sz) {
if (!(name = malloc(a_sz = sizeof("index.html")+32)))
return M_OUT_OF_MEM;
memcpy(name, "index.html", sizeof("index.html"));
ext_offs = strlen("index");
ext = strdup(".html");
} else {
if (!(name = malloc(a_sz = ext_offs+strlen(ext)+1+32)))
return M_OUT_OF_MEM;
memcpy(name, url->str+url->file_o+1, ext_offs);
strcpy(name+ext_offs, ext);
}
x=0;
if (stat(name, &st) == 0) {
do {
x++;
sz = sprintf(name+ext_offs, "-%d%s", x, ext);
} while (stat(name, &st) == 0);
}
r = lm_io_save(h, url, name);
if (r == M_OK) {
/* set the I/O buffer to the name of the file */
free(h->buf.ptr);
h->buf.ptr = name;
h->buf.sz = strlen(name);
h->buf.cap = a_sz;
} else
free(name);
free(ext);
return M_OK;
}
/**
* Parse the given string as CSS and add the found URLs to
* the uehandle.
**/
M_CODE
lm_extract_css_urls(uehandle_t *ue_h, char *p, size_t sz)
{
char *e = p+sz;
char *t, *s;
while ((p = memmem(p, e-p, "url", 3))) {
p += 3;
while (isspace(*p)) p++;
if (*p == '(') {
do p++; while (isspace(*p));
t = (*p == '"' ? "\")"
: (*p == '\'' ? "')" : ")"));
if (*t != ')')
p++;
} else
t = (*p == '"' ? "\""
: (*p == '\'' ? "'" : ";"));
if (!(s = memmem(p, e-p, t, strlen(t))))
continue;
ue_add(ue_h, p, s-p);
p = s;
}
return M_OK;
}
/**
* Default plaintext parser
**/
M_CODE
lm_parser_text(worker_t *w, iobuf_t *buf,
uehandle_t *ue_h, url_t *url,
attr_list_t *al)
{
return lm_extract_text_urls(ue_h, buf->ptr, buf->sz);
}
M_CODE
lm_extract_text_urls(uehandle_t *ue_h, char *p, size_t sz)
{
int x;
char *s, *e = p+sz;
for (p = strstr(p, "://"); p && p<e; p = strstr(p+1, "://")) {
for (x=0;x<2;x++) {
if (p-e >= protocols[x].len
&& strncmp(p-protocols[x].len, protocols[x].name, protocols[x].len) == 0) {
for (s=p+3; s < e; s++) {
if (!isalnum(*s) && *s != '%' && *s != '?'
&& *s != '=' && *s != '&' && *s != '/'
&& *s != '.') {
ue_add(ue_h, p-protocols[x].len, (s-p)+protocols[x].len);
break;
}
}
p = s;
}
}
}
return M_OK;
}
/**
* Default FTP parser. Expects data returned from the default
* FTP handler.
**/
M_CODE
lm_parser_ftp(worker_t *w, iobuf_t *buf,
uehandle_t *ue_h, url_t *url,
attr_list_t *al)
{
char *p, *prev;
struct ftpparse info;
char name[128]; /* i'm pretty sure no filename will be longer than 127 chars... */
int len;
for (prev = p = buf->ptr; p<buf->ptr+buf->sz; p++) {
if (*p == '\n') {
if (p-prev) {
if (ftpparse(&info, prev, p-prev)) {
if (info.namelen >= 126) {
LM_WARNING(w->m, "file name too long");
continue;
}
if (info.flagtrycwd) {
memcpy(name, info.name, info.namelen);
name[info.namelen] = '/';
name[info.namelen+1] = '\0';
len = info.namelen+1;
} else {
strncpy(name, info.name, info.namelen);
len = info.namelen;
}
ue_add(ue_h, name, len);
}
prev = p+1;
} else
prev = p+1;
}
}
return M_OK;
}
| nicholaides/Methanol-Web-Crawler | src/libmetha/builtin.c | C | isc | 6,361 |
/* Copyright (c) 2019, 2022 Dennis Wölfing
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* libc/src/stdio/__file_write.c
* Write data to a file. (called from C89)
*/
#define write __write
#include <unistd.h>
#include "FILE.h"
size_t __file_write(FILE* file, const unsigned char* p, size_t size) {
size_t written = 0;
while (written < size) {
ssize_t result = write(file->fd, p, size - written);
if (result < 0) {
file->flags |= FILE_FLAG_ERROR;
return written;
}
written += result;
p += result;
}
return written;
}
| dennis95/dennix | libc/src/stdio/__file_write.c | C | isc | 1,293 |
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#include <sys/types.h>
#include <ctype.h>
#include <errno.h>
#include <devid.h>
#include <fcntl.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <dlfcn.h>
#include <unistd.h>
#include <sys/efi_partition.h>
#include <sys/vtoc.h>
#include <sys/stat.h>
#include <sys/zfs_ioctl.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "libzfs_impl.h"
#include "zfs_comutil.h"
#include "format.h"
#include <syslog.h>
/*static int read_efi_label(nvlist_t *config, diskaddr_t *sb);*/
#if defined(__i386) || defined(__amd64)
#define BOOTCMD "installgrub(1M)"
#else
#define BOOTCMD "installboot(1M)"
#endif
#define DISK_ROOT "/dev"
#define RDISK_ROOT "/dev"
#define BACKUP_SLICE "s2"
/*
* ====================================================================
* zpool property functions
* ====================================================================
*/
static int
zpool_get_all_props(zpool_handle_t *zhp)
{
zfs_cmd_t zc = { 0 };
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
return (-1);
while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
if (errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
} else {
zcmd_free_nvlists(&zc);
return (-1);
}
}
if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
zcmd_free_nvlists(&zc);
return (0);
}
static int
zpool_props_refresh(zpool_handle_t *zhp)
{
nvlist_t *old_props;
old_props = zhp->zpool_props;
if (zpool_get_all_props(zhp) != 0)
return (-1);
nvlist_free(old_props);
return (0);
}
static char *
zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
zprop_source_t *src)
{
nvlist_t *nv, *nvl;
uint64_t ival;
char *value;
zprop_source_t source;
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
source = ival;
verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
} else {
source = ZPROP_SRC_DEFAULT;
if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
value = "-";
}
if (src)
*src = source;
return (value);
}
uint64_t
zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
{
nvlist_t *nv, *nvl;
uint64_t value;
zprop_source_t source;
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
/*
* zpool_get_all_props() has most likely failed because
* the pool is faulted, but if all we need is the top level
* vdev's guid then get it from the zhp config nvlist.
*/
if ((prop == ZPOOL_PROP_GUID) &&
(nvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
== 0)) {
return (value);
}
return (zpool_prop_default_numeric(prop));
}
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
source = value;
verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
} else {
source = ZPROP_SRC_DEFAULT;
value = zpool_prop_default_numeric(prop);
}
if (src)
*src = source;
return (value);
}
/*
* Map VDEV STATE to printed strings.
*/
char *
zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
{
switch (state) {
case VDEV_STATE_CLOSED:
case VDEV_STATE_OFFLINE:
return (gettext("OFFLINE"));
case VDEV_STATE_REMOVED:
return (gettext("REMOVED"));
case VDEV_STATE_CANT_OPEN:
if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
return (gettext("FAULTED"));
else if (aux == VDEV_AUX_SPLIT_POOL)
return (gettext("SPLIT"));
else
return (gettext("UNAVAIL"));
case VDEV_STATE_FAULTED:
return (gettext("FAULTED"));
case VDEV_STATE_DEGRADED:
return (gettext("DEGRADED"));
case VDEV_STATE_HEALTHY:
return (gettext("ONLINE"));
}
return (gettext("UNKNOWN"));
}
/*
* Get a zpool property value for 'prop' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
zprop_source_t *srctype)
{
uint64_t intval;
const char *strval;
zprop_source_t src = ZPROP_SRC_NONE;
nvlist_t *nvroot;
vdev_stat_t *vs;
uint_t vsc;
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
switch (prop) {
case ZPOOL_PROP_NAME:
(void) strlcpy(buf, zpool_get_name(zhp), len);
break;
case ZPOOL_PROP_HEALTH:
(void) strlcpy(buf, "FAULTED", len);
break;
case ZPOOL_PROP_GUID:
intval = zpool_get_prop_int(zhp, prop, &src);
(void) snprintf(buf, len, "%llu", (long long unsigned int)intval);
break;
case ZPOOL_PROP_ALTROOT:
case ZPOOL_PROP_CACHEFILE:
if (zhp->zpool_props != NULL ||
zpool_get_all_props(zhp) == 0) {
(void) strlcpy(buf,
zpool_get_prop_string(zhp, prop, &src),
len);
if (srctype != NULL)
*srctype = src;
return (0);
}
/* FALLTHROUGH */
default:
(void) strlcpy(buf, "-", len);
break;
}
if (srctype != NULL)
*srctype = src;
return (0);
}
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
prop != ZPOOL_PROP_NAME)
return (-1);
switch (zpool_prop_get_type(prop)) {
case PROP_TYPE_STRING:
(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
len);
break;
case PROP_TYPE_NUMBER:
intval = zpool_get_prop_int(zhp, prop, &src);
switch (prop) {
case ZPOOL_PROP_SIZE:
case ZPOOL_PROP_ALLOCATED:
case ZPOOL_PROP_FREE:
(void) zfs_nicenum(intval, buf, len);
break;
case ZPOOL_PROP_CAPACITY:
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
break;
case ZPOOL_PROP_DEDUPRATIO:
(void) snprintf(buf, len, "%llu.%02llux",
(u_longlong_t)(intval / 100),
(u_longlong_t)(intval % 100));
break;
case ZPOOL_PROP_HEALTH:
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
== 0);
(void) strlcpy(buf, zpool_state_to_name(intval,
vs->vs_aux), len);
break;
default:
(void) snprintf(buf, len, "%llu", (u_longlong_t) intval);
}
break;
case PROP_TYPE_INDEX:
intval = zpool_get_prop_int(zhp, prop, &src);
if (zpool_prop_index_to_string(prop, intval, &strval)
!= 0)
return (-1);
(void) strlcpy(buf, strval, len);
break;
default:
abort();
}
if (srctype)
*srctype = src;
return (0);
}
/*
* Check if the bootfs name has the same pool name as it is set to.
* Assuming bootfs is a valid dataset name.
*/
static boolean_t
bootfs_name_valid(const char *pool, char *bootfs)
{
int len = strlen(pool);
if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
return (B_FALSE);
if (strncmp(pool, bootfs, len) == 0 &&
(bootfs[len] == '/' || bootfs[len] == '\0'))
return (B_TRUE);
return (B_FALSE);
}
/*
* Inspect the configuration to determine if any of the devices contain
* an EFI label.
*/
/* ZFSFUSE: disabled */
#if 0
static boolean_t
pool_uses_efi(nvlist_t *config)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return (read_efi_label(config, NULL) >= 0);
for (c = 0; c < children; c++) {
if (pool_uses_efi(child[c]))
return (B_TRUE);
}
return (B_FALSE);
}
#endif
static boolean_t
pool_is_bootable(zpool_handle_t *zhp)
{
char bootfs[ZPOOL_MAXNAMELEN];
return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
sizeof (bootfs)) != 0);
}
/*
* Given an nvlist of zpool properties to be set, validate that they are
* correct, and parse any numeric properties (index, boolean, etc) if they are
* specified as strings.
*/
static nvlist_t *
zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
{
nvpair_t *elem;
nvlist_t *retprops;
zpool_prop_t prop;
char *strval;
uint64_t intval;
char *slash;
struct stat64 statbuf;
zpool_handle_t *zhp;
nvlist_t *nvroot;
if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
const char *propname = nvpair_name(elem);
/*
* Make sure this property is valid and applies to this type.
*/
if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zpool_prop_readonly(prop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is readonly"), propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
&strval, &intval, errbuf) != 0)
goto error;
/*
* Perform additional checking for specific properties.
*/
switch (prop) {
case ZPOOL_PROP_VERSION:
if (intval < version || intval > SPA_VERSION) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' number %d is invalid."),
propname, intval);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
break;
case ZPOOL_PROP_BOOTFS:
if (create_or_import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' cannot be set at creation "
"or import time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (version < SPA_VERSION_BOOTFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to support "
"'%s' property"), propname);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
/*
* bootfs property value has to be a dataset name and
* the dataset has to be in the same pool as it sets to.
*/
if (strval[0] != '\0' && !bootfs_name_valid(poolname,
strval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is an invalid name"), strval);
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto error;
}
if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"could not open pool '%s'"), poolname);
(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
goto error;
}
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
/*
* bootfs property cannot be set on a disk which has
* been EFI labeled.
*/
/* ZFSFUSE: disabled */
/*if (pool_uses_efi(nvroot)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' not supported on "
"EFI labeled devices"), propname);
(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
zpool_close(zhp);
goto error;
}*/
zpool_close(zhp);
break;
case ZPOOL_PROP_ALTROOT:
if (!create_or_import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set during pool "
"creation or import"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strval[0] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bad alternate root '%s'"), strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
break;
case ZPOOL_PROP_CACHEFILE:
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' must be empty, an "
"absolute path, or 'none'"), propname);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
slash = strrchr(strval, '/');
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid file"), strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
*slash = '\0';
if (strval[0] != '\0' &&
(stat64(strval, &statbuf) != 0 ||
!S_ISDIR(statbuf.st_mode))) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid directory"),
strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
*slash = '/';
break;
}
}
return (retprops);
error:
nvlist_free(retprops);
return (NULL);
}
/*
* Set zpool property : propname=propval.
*/
int
zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
{
zfs_cmd_t zc = { 0 };
int ret = -1;
char errbuf[1024];
nvlist_t *nvl = NULL;
nvlist_t *realprops;
uint64_t version;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zpool_name);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
if (nvlist_add_string(nvl, propname, propval) != 0) {
nvlist_free(nvl);
return (no_memory(zhp->zpool_hdl));
}
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
nvlist_free(nvl);
return (-1);
}
nvlist_free(nvl);
nvl = realprops;
/*
* Execute the corresponding ioctl() to set this property.
*/
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
nvlist_free(nvl);
return (-1);
}
ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
zcmd_free_nvlists(&zc);
nvlist_free(nvl);
if (ret)
(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
else
(void) zpool_props_refresh(zhp);
return (ret);
}
int
zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
zprop_list_t *entry;
char buf[ZFS_MAXPROPLEN];
if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
return (-1);
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed)
continue;
if (entry->pl_prop != ZPROP_INVAL &&
zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
NULL) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
}
return (0);
}
/*
* Don't start the slice at the default block of 34; many storage
* devices will use a stripe width of 128k, so start there instead.
*/
#define NEW_START_BLOCK 256
/*
* Validate the given pool name, optionally putting an extended error message in
* 'buf'.
*/
boolean_t
zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
{
namecheck_err_t why;
char what;
int ret;
ret = pool_namecheck(pool, &why, &what);
/*
* The rules for reserved pool names were extended at a later point.
* But we need to support users with existing pools that may now be
* invalid. So we only check for this expanded set of names during a
* create (or import), and only in userland.
*/
if (ret == 0 && !isopen &&
(strncmp(pool, "mirror", 6) == 0 ||
strncmp(pool, "raidz", 5) == 0 ||
strncmp(pool, "spare", 5) == 0 ||
strcmp(pool, "log") == 0)) {
if (hdl != NULL)
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "name is reserved"));
return (B_FALSE);
}
if (ret != 0) {
if (hdl != NULL) {
switch (why) {
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "name is too long"));
break;
case NAME_ERR_INVALCHAR:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "invalid character "
"'%c' in pool name"), what);
break;
case NAME_ERR_NOLETTER:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name must begin with a letter"));
break;
case NAME_ERR_RESERVED:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is reserved"));
break;
case NAME_ERR_DISKLIKE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool name is reserved"));
break;
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"leading slash in name"));
break;
case NAME_ERR_EMPTY_COMPONENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty component in name"));
break;
case NAME_ERR_TRAILING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"trailing slash in name"));
break;
case NAME_ERR_MULTIPLE_AT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple '@' delimiters in name"));
break;
}
}
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Open a handle to the given pool, even if the pool is currently in the FAULTED
* state.
*/
zpool_handle_t *
zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
{
zpool_handle_t *zhp;
boolean_t missing;
/*
* Make sure the pool name is valid.
*/
if (!zpool_name_valid(hdl, B_TRUE, pool)) {
(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot open '%s'"),
pool);
return (NULL);
}
if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
return (NULL);
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (NULL);
}
if (missing) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
(void) zfs_error_fmt(hdl, EZFS_NOENT,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
zpool_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Like the above, but silent on error. Used when iterating over pools (because
* the configuration cache may be out of date).
*/
int
zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
{
zpool_handle_t *zhp;
boolean_t missing;
if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
return (-1);
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (-1);
}
if (missing) {
zpool_close(zhp);
*ret = NULL;
return (0);
}
*ret = zhp;
return (0);
}
/*
* Similar to zpool_open_canfail(), but refuses to open pools in the faulted
* state.
*/
zpool_handle_t *
zpool_open(libzfs_handle_t *hdl, const char *pool)
{
zpool_handle_t *zhp;
if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
return (NULL);
if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
zpool_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Close the handle. Simply frees the memory associated with the handle.
*/
void
zpool_close(zpool_handle_t *zhp)
{
if (zhp->zpool_config)
nvlist_free(zhp->zpool_config);
if (zhp->zpool_old_config)
nvlist_free(zhp->zpool_old_config);
if (zhp->zpool_props)
nvlist_free(zhp->zpool_props);
free(zhp);
}
/*
* Return the name of the pool.
*/
const char *
zpool_get_name(zpool_handle_t *zhp)
{
return (zhp->zpool_name);
}
/*
* Return the state of the pool (ACTIVE or UNAVAILABLE)
*/
int
zpool_get_state(zpool_handle_t *zhp)
{
return (zhp->zpool_state);
}
/*
* Create the named pool, using the provided vdev list. It is assumed
* that the consumer has already validated the contents of the nvlist, so we
* don't have to worry about error semantics.
*/
int
zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
nvlist_t *props, nvlist_t *fsprops)
{
zfs_cmd_t zc = { 0 };
nvlist_t *zc_fsprops = NULL;
nvlist_t *zc_props = NULL;
char msg[1024];
char *altroot;
int ret = -1;
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), pool);
if (!zpool_name_valid(hdl, B_FALSE, pool))
return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
return (-1);
if (props) {
if ((zc_props = zpool_valid_proplist(hdl, pool, props,
SPA_VERSION_1, B_TRUE, msg)) == NULL) {
goto create_failed;
}
}
if (fsprops) {
uint64_t zoned;
char *zonestr;
zoned = ((nvlist_lookup_string(fsprops,
zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
strcmp(zonestr, "on") == 0);
if ((zc_fsprops = zfs_valid_proplist(hdl,
ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
goto create_failed;
}
if (!zc_props &&
(nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
goto create_failed;
}
if (nvlist_add_nvlist(zc_props,
ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
goto create_failed;
}
}
if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
goto create_failed;
(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(zc_fsprops);
switch (errno) {
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device"));
return (zfs_error(hdl, EZFS_BADDEV, msg));
case EOVERFLOW:
/*
* This occurs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is less than the "
"minimum size (%s)"), buf);
}
return (zfs_error(hdl, EZFS_BADDEV, msg));
case ENOSPC:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is out of space"));
return (zfs_error(hdl, EZFS_BADDEV, msg));
case ENOTBLK:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cache device must be a disk or disk slice"));
return (zfs_error(hdl, EZFS_BADDEV, msg));
default:
return (zpool_standard_error(hdl, errno, msg));
}
}
/*
* If this is an alternate root pool, then we automatically set the
* mountpoint of the root dataset to be '/'.
*/
if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
&altroot) == 0) {
zfs_handle_t *zhp;
verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
"/") == 0);
zfs_close(zhp);
}
create_failed:
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(zc_fsprops);
return (ret);
}
/*
* Destroy the given pool. It is up to the caller to ensure that there are no
* datasets left in the pool.
*/
int
zpool_destroy(zpool_handle_t *zhp)
{
zfs_cmd_t zc = { 0 };
zfs_handle_t *zfp = NULL;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char msg[1024];
if (zhp->zpool_state == POOL_STATE_ACTIVE &&
(zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
ZFS_TYPE_FILESYSTEM)) == NULL)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot destroy '%s'"), zhp->zpool_name);
if (errno == EROFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
} else {
(void) zpool_standard_error(hdl, errno, msg);
}
if (zfp)
zfs_close(zfp);
return (-1);
}
if (zfp) {
remove_mountpoint(zfp);
zfs_close(zfp);
}
return (0);
}
/*
* Add the given vdevs to the pool. The caller must have already performed the
* necessary verification to ensure that the vdev specification is well-formed.
*/
int
zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
{
zfs_cmd_t zc = { 0 };
int ret;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char msg[1024];
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot add to '%s'"), zhp->zpool_name);
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_SPARES &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add hot spares"));
return (zfs_error(hdl, EZFS_BADVERSION, msg));
}
#if 0
if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
uint64_t s;
for (s = 0; s < nspares; s++) {
char *path;
if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
&path) == 0 && pool_uses_efi(spares[s])) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device '%s' contains an EFI label and "
"cannot be used on root pools."),
zpool_vdev_name(hdl, NULL, spares[s],
B_FALSE));
return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
}
}
}
#endif
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_L2CACHE &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add cache devices"));
return (zfs_error(hdl, EZFS_BADVERSION, msg));
}
if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
switch (errno) {
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case EOVERFLOW:
/*
* This occurrs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is less than the minimum "
"size (%s)"), buf);
}
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to add these vdevs"));
(void) zfs_error(hdl, EZFS_BADVERSION, msg);
break;
case EDOM:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"root pool can not have multiple vdevs"
" or separate logs"));
(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
break;
case ENOTBLK:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cache device must be a disk or disk slice"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
default:
(void) zpool_standard_error(hdl, errno, msg);
}
ret = -1;
} else {
ret = 0;
}
zcmd_free_nvlists(&zc);
return (ret);
}
/*
* Exports the pool from the system. The caller must ensure that there are no
* mounted datasets in the pool.
*/
int
zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot export '%s'"), zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = force;
zc.zc_guid = hardforce;
#define ZFSFUSE_BUSY_SLEEP_FACTOR 500000 // .5 seconds was chosen ater some tuning
int retry = 0;
int ret;
while ((ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc)) == EBUSY
&& retry++ < 6) {
struct timeval timeout;
/* Something in the way zfs-fuse works keeps the datasets busy for
* longer than expected.
* If we try to export/destroy a pool containing a few fs like
* pool/fs1/fs2, then it will try to export it much before the umounts
* are really finished.
* The sleep is a temporary workaround here.
* The zfsfuse_destroy function is called after umount has already
* returned, so the only solution is to allow a pause here in case the
* export fails with EBUSY */
timeout.tv_sec=0;
timeout.tv_usec=ZFSFUSE_BUSY_SLEEP_FACTOR;
VERIFY(select(0,NULL,NULL,NULL,&timeout)==0);
}
if (retry>0)
syslog(LOG_WARNING, "Pool '%s' was busy, export was tried for %0.1fs (%i attempts) resulting in %s",
zhp->zpool_name, (retry*ZFSFUSE_BUSY_SLEEP_FACTOR)/100000.0, retry, strerror(errno));
if (ret != 0) {
switch (errno) {
case EXDEV:
zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
"use '-f' to override the following errors:\n"
"'%s' has an active shared spare which could be"
" used by other pools once '%s' is exported."),
zhp->zpool_name, zhp->zpool_name);
return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
msg));
default:
return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
msg));
}
}
return (0);
}
int
zpool_export(zpool_handle_t *zhp, boolean_t force)
{
return (zpool_export_common(zhp, force, B_FALSE));
}
int
zpool_export_force(zpool_handle_t *zhp)
{
return (zpool_export_common(zhp, B_TRUE, B_TRUE));
}
static void
zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
nvlist_t *rbi)
{
uint64_t rewindto;
int64_t loss = -1;
struct tm t;
char timestr[128];
if (!hdl->libzfs_printerr || rbi == NULL)
return;
if (nvlist_lookup_uint64(rbi, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
return;
(void) nvlist_lookup_int64(rbi, ZPOOL_CONFIG_REWIND_TIME, &loss);
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
strftime(timestr, 128, "%F", &t) != 0) {
if (dryrun) {
(void) printf(dgettext(TEXT_DOMAIN,
"Would be able to return %s "
"to its state as of %s.\n"),
name, timestr);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"Pool %s returned to its state as of %s.\n"),
name, timestr);
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
"%s approximately " FI64 " "),
dryrun ? "Would discard" : "Discarded",
(loss + 30) / 60);
(void) printf(dgettext(TEXT_DOMAIN,
"minutes of transactions.\n"));
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"%s approximately " FI64 " "),
dryrun ? "Would discard" : "Discarded", loss);
(void) printf(dgettext(TEXT_DOMAIN,
"seconds of transactions.\n"));
}
}
}
void
zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
nvlist_t *config)
{
int64_t loss = -1;
uint64_t edata = UINT64_MAX;
uint64_t rewindto;
struct tm t;
char timestr[128];
if (!hdl->libzfs_printerr)
return;
if (reason >= 0)
(void) printf(dgettext(TEXT_DOMAIN, "action: "));
else
(void) printf(dgettext(TEXT_DOMAIN, "\t"));
/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
if (nvlist_lookup_uint64(config,
ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
goto no_info;
(void) nvlist_lookup_int64(config, ZPOOL_CONFIG_REWIND_TIME, &loss);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
&edata);
(void) printf(dgettext(TEXT_DOMAIN,
"Recovery is possible, but will result in some data loss.\n"));
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
strftime(timestr, 128, "%F", &t) != 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"\tReturning the pool to its state as of %s\n"
"\tshould correct the problem. "),
timestr);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"\tReverting the pool to an earlier state "
"should correct the problem.\n\t"));
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
"Approximately " FI64 " minutes of data\n"
"\tmust be discarded, irreversibly. "), (loss + 30) / 60);
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"Approximately " FI64 " seconds of data\n"
"\tmust be discarded, irreversibly. "), loss);
}
if (edata != 0 && edata != UINT64_MAX) {
if (edata == 1) {
(void) printf(dgettext(TEXT_DOMAIN,
"After rewind, at least\n"
"\tone persistent user-data error will remain. "));
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"After rewind, several\n"
"\tpersistent user-data errors will remain. "));
}
}
(void) printf(dgettext(TEXT_DOMAIN,
"Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
reason >= 0 ? "clear" : "import", name);
(void) printf(dgettext(TEXT_DOMAIN,
"A scrub of the pool\n"
"\tis strongly recommended after recovery.\n"));
return;
no_info:
(void) printf(dgettext(TEXT_DOMAIN,
"Destroy and re-create the pool from\n\ta backup source.\n"));
}
/*
* zpool_import() is a contracted interface. Should be kept the same
* if possible.
*
* Applications should use zpool_import_props() to import a pool with
* new properties value to be set.
*/
int
zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
char *altroot)
{
nvlist_t *props = NULL;
int ret;
if (altroot != NULL) {
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
return (zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
}
if (nvlist_add_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
nvlist_add_string(props,
zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
nvlist_free(props);
return (zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
}
}
ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
if (props)
nvlist_free(props);
return (ret);
}
/*
* Import the given pool using the known configuration and a list of
* properties to be set. The configuration should have come from
* zpool_find_import(). The 'newname' parameters control whether the pool
* is imported with a different name.
*/
int
zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
nvlist_t *props, boolean_t importfaulted)
{
zfs_cmd_t zc = { 0 };
zpool_rewind_policy_t policy;
nvlist_t *nvi = NULL;
char *thename;
char *origname;
uint64_t returned_size;
int ret;
char errbuf[1024];
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&origname) == 0);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot import pool '%s'"), origname);
if (newname != NULL) {
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
thename = (char *)newname;
} else {
thename = origname;
}
if (props) {
uint64_t version;
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
if ((props = zpool_valid_proplist(hdl, origname,
props, version, B_TRUE, errbuf)) == NULL) {
return (-1);
} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
nvlist_free(props);
return (-1);
}
}
(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&zc.zc_guid) == 0);
if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
nvlist_free(props);
return (-1);
}
returned_size = zc.zc_nvlist_conf_size + 512;
if (zcmd_alloc_dst_nvlist(hdl, &zc, returned_size) != 0) {
nvlist_free(props);
return (-1);
}
zc.zc_cookie = (uint64_t)importfaulted;
ret = 0;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
char desc[1024];
(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
zpool_get_rewind_policy(config, &policy);
/*
* Dry-run failed, but we print out what success
* looks like if we found a best txg
*/
if ((policy.zrp_request & ZPOOL_TRY_REWIND) && nvi) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
B_TRUE, nvi);
nvlist_free(nvi);
return (-1);
}
if (newname == NULL)
(void) snprintf(desc, sizeof (desc),
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
thename);
else
(void) snprintf(desc, sizeof (desc),
dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
origname, thename);
switch (errno) {
case ENOTSUP:
/*
* Unsupported version.
*/
(void) zfs_error(hdl, EZFS_BADVERSION, desc);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
break;
case EROFS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
(void) zfs_error(hdl, EZFS_BADDEV, desc);
break;
default:
(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
(void) zpool_standard_error(hdl, errno, desc);
zpool_explain_recover(hdl,
newname ? origname : thename, -errno, nvi);
nvlist_free(nvi);
break;
}
ret = -1;
} else {
zpool_handle_t *zhp;
/*
* This should never fail, but play it safe anyway.
*/
if (zpool_open_silent(hdl, thename, &zhp) != 0)
ret = -1;
else if (zhp != NULL)
zpool_close(zhp);
(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
zpool_get_rewind_policy(config, &policy);
if (policy.zrp_request &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
nvi);
}
nvlist_free(nvi);
return (0);
}
zcmd_free_nvlists(&zc);
nvlist_free(props);
return (ret);
}
/*
* Scan the pool.
*/
int
zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = func;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
(errno == ENOENT && func != POOL_SCAN_NONE))
return (0);
if (func == POOL_SCAN_SCRUB) {
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
} else if (func == POOL_SCAN_NONE) {
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
zc.zc_name);
} else {
assert(!"unexpected result");
}
if (errno == EBUSY) {
nvlist_t *nvroot;
pool_scan_stat_t *ps = NULL;
uint_t psc;
verify(nvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
if (ps && ps->pss_func == POOL_SCAN_SCRUB)
return (zfs_error(hdl, EZFS_SCRUBBING, msg));
else
return (zfs_error(hdl, EZFS_RESILVERING, msg));
} else if (errno == ENOENT) {
return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
} else {
return (zpool_standard_error(hdl, errno, msg));
}
}
/*
* Find a vdev that matches the search criteria specified. We use the
* the nvpair name to determine how we should look for the device.
* 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
* spare; but FALSE if its an INUSE spare.
*/
static nvlist_t *
vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
uint_t c, children;
nvlist_t **child;
nvlist_t *ret;
uint64_t is_log;
char *srchkey;
nvpair_t *pair = nvlist_next_nvpair(search, NULL);
/* Nothing to look for */
if (search == NULL || pair == NULL)
return (NULL);
/* Obtain the key we will use to search */
srchkey = nvpair_name(pair);
switch (nvpair_type(pair)) {
case DATA_TYPE_UINT64: {
uint64_t srchval, theguid, present;
verify(nvpair_value_uint64(pair, &srchval) == 0);
if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&present) == 0) {
/*
* If the device has never been present since
* import, the only reliable way to match the
* vdev is by GUID.
*/
verify(nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_GUID, &theguid) == 0);
if (theguid == srchval)
return (nv);
}
}
break;
}
case DATA_TYPE_STRING: {
char *srchval, *val;
verify(nvpair_value_string(pair, &srchval) == 0);
if (nvlist_lookup_string(nv, srchkey, &val) != 0)
break;
/*
* Search for the requested value. We special case the search
* for ZPOOL_CONFIG_PATH when it's a wholedisk and when
* Looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
* Otherwise, all other searches are simple string compares.
*/
if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && val) {
uint64_t wholedisk = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
if (wholedisk) {
/*
* For whole disks, the internal path has 's0',
* but the path passed in by the user doesn't.
*/
if (strlen(srchval) == strlen(val) - 2 &&
strncmp(srchval, val, strlen(srchval)) == 0)
return (nv);
break;
}
} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
char *type, *idx, *end, *p;
uint64_t id, vdev_id;
/*
* Determine our vdev type, keeping in mind
* that the srchval is composed of a type and
* vdev id pair (i.e. mirror-4).
*/
if ((type = strdup(srchval)) == NULL)
return (NULL);
if ((p = strrchr(type, '-')) == NULL) {
free(type);
break;
}
idx = p + 1;
*p = '\0';
/*
* If the types don't match then keep looking.
*/
if (strncmp(val, type, strlen(val)) != 0) {
free(type);
break;
}
verify(strncmp(type, VDEV_TYPE_RAIDZ,
strlen(VDEV_TYPE_RAIDZ)) == 0 ||
strncmp(type, VDEV_TYPE_MIRROR,
strlen(VDEV_TYPE_MIRROR)) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
&id) == 0);
errno = 0;
vdev_id = strtoull(idx, &end, 10);
free(type);
if (errno != 0)
return (NULL);
/*
* Now verify that we have the correct vdev id.
*/
if (vdev_id == id)
return (nv);
}
/*
* Common case
*/
if (strcmp(srchval, val) == 0)
return (nv);
break;
}
default:
break;
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
/*
* The 'is_log' value is only set for the toplevel
* vdev, not the leaf vdevs. So we always lookup the
* log device from the root of the vdev tree (where
* 'log' is non-NULL).
*/
if (log != NULL &&
nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
is_log) {
*log = B_TRUE;
}
return (ret);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
*avail_spare = B_TRUE;
return (ret);
}
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
*l2cache = B_TRUE;
return (ret);
}
}
}
return (NULL);
}
/*
* Given a physical path (minus the "/devices" prefix), find the
* associated vdev.
*/
nvlist_t *
zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
{
nvlist_t *search, *nvroot, *ret;
verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
*avail_spare = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
nvlist_free(search);
return (ret);
}
/*
* Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
*/
boolean_t
zpool_vdev_is_interior(const char *name)
{
if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
return (B_TRUE);
return (B_FALSE);
}
nvlist_t *
zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
char buf[MAXPATHLEN];
char *end;
nvlist_t *nvroot, *search, *ret;
uint64_t guid;
verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
guid = strtoull(path, &end, 10);
if (guid != 0 && *end == '\0') {
verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
} else if (zpool_vdev_is_interior(path)) {
verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
} else if (path[0] != '/') {
(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
} else {
verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
}
verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
nvlist_free(search);
return (ret);
}
static int
vdev_online(nvlist_t *nv)
{
uint64_t ival;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
return (0);
return (1);
}
/*
* Helper function for zpool_get_physpaths().
*/
static int
vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
size_t *bytes_written)
{
size_t bytes_left, pos, rsz;
char *tmppath;
const char *format;
if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
&tmppath) != 0)
return (EZFS_NODEVICE);
pos = *bytes_written;
bytes_left = physpath_size - pos;
format = (pos == 0) ? "%s" : " %s";
rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
*bytes_written += rsz;
if (rsz >= bytes_left) {
/* if physpath was not copied properly, clear it */
if (bytes_left != 0) {
physpath[pos] = 0;
}
return (EZFS_NOSPC);
}
return (0);
}
static int
vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
size_t *rsz, boolean_t is_spare)
{
char *type;
int ret;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (EZFS_INVALCONFIG);
if (strcmp(type, VDEV_TYPE_DISK) == 0) {
/*
* An active spare device has ZPOOL_CONFIG_IS_SPARE set.
* For a spare vdev, we only want to boot from the active
* spare device.
*/
if (is_spare) {
uint64_t spare = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
&spare);
if (!spare)
return (EZFS_INVALCONFIG);
}
if (vdev_online(nv)) {
if ((ret = vdev_get_one_physpath(nv, physpath,
phypath_size, rsz)) != 0)
return (ret);
}
} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
(is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
nvlist_t **child;
uint_t count;
int i, ret;
if (nvlist_lookup_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
return (EZFS_INVALCONFIG);
for (i = 0; i < count; i++) {
ret = vdev_get_physpaths(child[i], physpath,
phypath_size, rsz, is_spare);
if (ret == EZFS_NOSPC)
return (ret);
}
}
return (EZFS_POOL_INVALARG);
}
/*
* Get phys_path for a root pool config.
* Return 0 on success; non-zero on failure.
*/
static int
zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
{
size_t rsz;
nvlist_t *vdev_root;
nvlist_t **child;
uint_t count;
char *type;
rsz = 0;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&vdev_root) != 0)
return (EZFS_INVALCONFIG);
if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
&child, &count) != 0)
return (EZFS_INVALCONFIG);
/*
* root pool can not have EFI labeled disks and can only have
* a single top-level vdev.
*/
#if 0
if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
pool_uses_efi(vdev_root))
return (EZFS_POOL_INVALARG);
#endif
(void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
B_FALSE);
/* No online devices */
if (rsz == 0)
return (EZFS_NODEVICE);
return (0);
}
/*
* Get phys_path for a root pool
* Return 0 on success; non-zero on failure.
*/
int
zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
{
return (zpool_get_config_physpath(zhp->zpool_config, physpath,
phypath_size));
}
/*
* If the device has being dynamically expanded then we need to relabel
* the disk to use the new unallocated space.
*/
static int
zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
{
char path[MAXPATHLEN];
char errbuf[1024];
int fd, error;
int (*_efi_use_whole_disk)(int);
if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
"efi_use_whole_disk")) == NULL)
return (-1);
(void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
"relabel '%s': unable to open device"), name);
return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
}
/*
* It's possible that we might encounter an error if the device
* does not have any unallocated space left. If so, we simply
* ignore that error and continue on.
*/
/* zfs-fuse : no efi function here, this should be fixed later if
* possible...
* error = _efi_use_whole_disk(fd); */
(void) close(fd);
/* if (error && error != VT_ENOSPC) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
"relabel '%s': unable to read disk capacity"), name);
return (zfs_error(hdl, EZFS_NOCAP, errbuf));
} */
return (0);
}
/*
* Bring the specified vdev online. The 'flags' parameter is a set of the
* ZFS_ONLINE_* flags.
*/
int
zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
vdev_state_t *newstate)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
if (flags & ZFS_ONLINE_EXPAND) {
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
} else {
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot online %s"), path);
}
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
if (flags & ZFS_ONLINE_EXPAND ||
zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
char *pathname = NULL;
uint64_t wholedisk = 0;
(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
&pathname) == 0);
/*
* XXX - L2ARC 1.0 devices can't support expansion.
*/
if (l2cache) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot expand cache devices"));
return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
}
if (wholedisk) {
pathname += strlen(DISK_ROOT) + 1;
(void) zpool_relabel_disk(zhp->zpool_hdl, pathname);
}
}
zc.zc_cookie = VDEV_STATE_ONLINE;
zc.zc_obj = flags;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
if (errno == EINVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
"from this pool into a new one. Use '%s' "
"instead"), "zpool detach");
return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
}
return (zpool_standard_error(hdl, errno, msg));
}
*newstate = zc.zc_cookie;
return (0);
}
/*
* Take the specified vdev offline
*/
int
zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
zc.zc_cookie = VDEV_STATE_OFFLINE;
zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
case EBUSY:
/*
* There are no other replicas of this device.
*/
return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
case EEXIST:
/*
* The log device has unplayed logs
*/
return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
default:
return (zpool_standard_error(hdl, errno, msg));
}
}
/*
* Mark the given vdev faulted.
*/
int
zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t) guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = VDEV_STATE_FAULTED;
zc.zc_obj = aux;
if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
case EBUSY:
/*
* There are no other replicas of this device.
*/
return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
default:
return (zpool_standard_error(hdl, errno, msg));
}
}
/*
* Mark the given vdev degraded.
*/
int
zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t) guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = VDEV_STATE_DEGRADED;
zc.zc_obj = aux;
if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
}
/*
* Returns TRUE if the given nvlist is a vdev that was originally swapped in as
* a hot spare.
*/
static boolean_t
is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
{
nvlist_t **child;
uint_t c, children;
char *type;
if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
&children) == 0) {
verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
&type) == 0);
if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
children == 2 && child[which] == tgt)
return (B_TRUE);
for (c = 0; c < children; c++)
if (is_replacing_spare(child[c], tgt, which))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Attach new_disk (fully described by nvroot) to old_disk.
* If 'replacing' is specified, the new disk will replace the old one.
*/
int
zpool_vdev_attach(zpool_handle_t *zhp,
const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
int ret;
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
uint64_t val;
char *path, *newname;
nvlist_t **child;
uint_t children;
nvlist_t *config_root;
libzfs_handle_t *hdl = zhp->zpool_hdl;
boolean_t rootpool = pool_is_bootable(zhp);
if (replacing)
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot replace %s with %s"), old_disk, new_disk);
else
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot attach %s to %s"), new_disk, old_disk);
/*
* If this is a root pool, make sure that we're not attaching an
* EFI labeled device.
*/
#if 0
if (rootpool && pool_uses_efi(nvroot)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"EFI labeled devices are not supported on root pools."));
return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
}
#endif
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
&islog)) == 0)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
if (l2cache)
return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
zc.zc_cookie = replacing;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0 || children != 1) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
}
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
return (-1);
/*
* If the target is a hot spare that has been swapped in, we can only
* replace it with another hot spare.
*/
if (replacing &&
nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
(zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
NULL) == NULL || !avail_spare) &&
is_replacing_spare(config_root, tgt, 1)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only be replaced by another hot spare"));
free(newname);
return (zfs_error(hdl, EZFS_BADTARGET, msg));
}
/*
* If we are attempting to replace a spare, it canot be applied to an
* already spared device.
*/
if (replacing &&
nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
zpool_find_vdev(zhp, newname, &avail_spare,
&l2cache, NULL) != NULL && avail_spare &&
is_replacing_spare(config_root, tgt, 0)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device has already been replaced with a spare"));
free(newname);
return (zfs_error(hdl, EZFS_BADTARGET, msg));
}
free(newname);
if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
return (-1);
ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
zcmd_free_nvlists(&zc);
if (ret == 0) {
if (rootpool) {
/*
* XXX - This should be removed once we can
* automatically install the bootblocks on the
* newly attached disk.
*/
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
"be sure to invoke %s to make '%s' bootable.\n"),
BOOTCMD, new_disk);
/*
* XXX need a better way to prevent user from
* booting up a half-baked vdev.
*/
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
"sure to wait until resilver is done "
"before rebooting.\n"));
}
return (0);
}
switch (errno) {
case ENOTSUP:
/*
* Can't attach to or replace this type of vdev.
*/
if (replacing) {
if (islog)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a log with a spare"));
else
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a replacing device"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only attach to mirrors and top-level "
"disks"));
}
(void) zfs_error(hdl, EZFS_BADTARGET, msg);
break;
case EINVAL:
/*
* The new device must be a single disk.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
new_disk);
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case EOVERFLOW:
/*
* The new device is too small.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is too small"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case EDOM:
/*
* The new device has a different alignment requirement.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"devices have different sector alignment"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case ENAMETOOLONG:
/*
* The resulting top-level vdev spec won't fit in the label.
*/
(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
break;
default:
(void) zpool_standard_error(hdl, errno, msg);
}
return (-1);
}
/*
* Detach the specified device.
*/
int
zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == 0)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
if (l2cache)
return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
return (0);
switch (errno) {
case ENOTSUP:
/*
* Can't detach from this type of vdev.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
"applicable to mirror and replacing vdevs"));
(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
break;
case EBUSY:
/*
* There are no other replicas of this device.
*/
(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
break;
default:
(void) zpool_standard_error(hdl, errno, msg);
}
return (-1);
}
/*
* Find a mirror vdev in the source nvlist.
*
* The mchild array contains a list of disks in one of the top-level mirrors
* of the source pool. The schild array contains a list of disks that the
* user specified on the command line. We loop over the mchild array to
* see if any entry in the schild array matches.
*
* If a disk in the mchild array is found in the schild array, we return
* the index of that entry. Otherwise we return -1.
*/
static int
find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
nvlist_t **schild, uint_t schildren)
{
uint_t mc;
for (mc = 0; mc < mchildren; mc++) {
uint_t sc;
char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
mchild[mc], B_FALSE);
for (sc = 0; sc < schildren; sc++) {
char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
schild[sc], B_FALSE);
boolean_t result = (strcmp(mpath, spath) == 0);
free(spath);
if (result) {
free(mpath);
return (mc);
}
}
free(mpath);
}
return (-1);
}
/*
* Split a mirror pool. If newroot points to null, then a new nvlist
* is generated and it is the responsibility of the caller to free it.
*/
int
zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
nvlist_t *props, splitflags_t flags)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
nvlist_t **varray = NULL, *zc_props = NULL;
uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t vers;
boolean_t freelist = B_FALSE, memory_err = B_TRUE;
int retval = 0;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("Internal error: unable to "
"retrieve pool configuration\n"));
return (-1);
}
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
== 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
if (props) {
if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
props, vers, B_TRUE, msg)) == NULL)
return (-1);
}
if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool is missing vdev tree"));
if (zc_props)
nvlist_free(zc_props);
return (-1);
}
varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
vcount = 0;
if (*newroot == NULL ||
nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
&newchild, &newchildren) != 0)
newchildren = 0;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
char *type;
nvlist_t **mchild, *vdev;
uint_t mchildren;
int entry;
/*
* Unlike cache & spares, slogs are stored in the
* ZPOOL_CONFIG_CHILDREN array. We filter them out here.
*/
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_log || is_hole) {
/*
* Create a hole vdev and put it in the config.
*/
if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
goto out;
if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE) != 0)
goto out;
if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
1) != 0)
goto out;
if (lastlog == 0)
lastlog = vcount;
varray[vcount++] = vdev;
continue;
}
lastlog = 0;
verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
== 0);
if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool must be composed only of mirrors\n"));
retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
goto out;
}
verify(nvlist_lookup_nvlist_array(child[c],
ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
/* find or add an entry for this top-level vdev */
if (newchildren > 0 &&
(entry = find_vdev_entry(zhp, mchild, mchildren,
newchild, newchildren)) >= 0) {
/* We found a disk that the user specified. */
vdev = mchild[entry];
++found;
} else {
/* User didn't specify a disk for this vdev. */
vdev = mchild[mchildren - 1];
}
if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
goto out;
}
/* did we find every disk the user specified? */
if (found != newchildren) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
"include at most one disk from each mirror"));
retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
goto out;
}
/* Prepare the nvlist for populating. */
if (*newroot == NULL) {
if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
goto out;
freelist = B_TRUE;
if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) != 0)
goto out;
} else {
verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
}
/* Add all the children we found */
if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
lastlog == 0 ? vcount : lastlog) != 0)
goto out;
/*
* If we're just doing a dry run, exit now with success.
*/
if (flags.dryrun) {
memory_err = B_FALSE;
freelist = B_FALSE;
goto out;
}
/* now build up the config list & call the ioctl */
if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
goto out;
if (nvlist_add_nvlist(newconfig,
ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
nvlist_add_string(newconfig,
ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
goto out;
/*
* The new pool is automatically part of the namespace unless we
* explicitly export it.
*/
if (!flags.import)
zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
goto out;
if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
goto out;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
retval = zpool_standard_error(hdl, errno, msg);
goto out;
}
freelist = B_FALSE;
memory_err = B_FALSE;
out:
if (varray != NULL) {
int v;
for (v = 0; v < vcount; v++)
nvlist_free(varray[v]);
free(varray);
}
zcmd_free_nvlists(&zc);
if (zc_props)
nvlist_free(zc_props);
if (newconfig)
nvlist_free(newconfig);
if (freelist) {
nvlist_free(*newroot);
*newroot = NULL;
}
if (retval != 0)
return (retval);
if (memory_err)
return (no_memory(hdl));
return (0);
}
/*
* Remove the given device. Currently, this is supported only for hot spares
* and level 2 cache devices.
*/
int
zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t version;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == 0)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
/*
* XXX - this should just go away.
*/
if (!avail_spare && !l2cache && !islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"only inactive hot spares, cache, top-level, "
"or log devices can be removed"));
return (zfs_error(hdl, EZFS_NODEVICE, msg));
}
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (islog && version < SPA_VERSION_HOLES) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgrade to support log removal"));
return (zfs_error(hdl, EZFS_BADVERSION, msg));
}
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
}
/*
* Clear the errors for the pool, or the particular device if specified.
*/
int
zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
nvlist_t *tgt;
zpool_rewind_policy_t policy;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
nvlist_t *nvi = NULL;
if (path)
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
path);
else
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (path) {
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
&l2cache, NULL)) == 0)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
/*
* Don't allow error clearing for hot spares. Do allow
* error clearing for l2cache devices.
*/
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
&zc.zc_guid) == 0);
}
zpool_get_rewind_policy(rewindnvl, &policy);
zc.zc_cookie = policy.zrp_request;
if (zcmd_alloc_dst_nvlist(hdl, &zc, 8192) != 0)
return (-1);
if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, rewindnvl) != 0)
return (-1);
if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0 ||
((policy.zrp_request & ZPOOL_TRY_REWIND) &&
errno != EPERM && errno != EACCES)) {
if (policy.zrp_request &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
zpool_rewind_exclaim(hdl, zc.zc_name,
((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
nvi);
nvlist_free(nvi);
}
zcmd_free_nvlists(&zc);
return (0);
}
zcmd_free_nvlists(&zc);
return (zpool_standard_error(hdl, errno, msg));
}
/*
* Similar to zpool_clear(), but takes a GUID (used by fmd).
*/
int
zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
{
zfs_cmd_t zc = { 0 };
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
(longlong_t) guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
}
/*
* Convert from a devid string to a path.
*/
static char *
devid_to_path(char *devid_str)
{
ddi_devid_t devid;
char *minor;
char *path;
devid_nmlist_t *list = NULL;
int ret;
if (devid_str_decode(devid_str, &devid, &minor) != 0)
return (NULL);
ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
devid_str_free(minor);
devid_free(devid);
if (ret != 0)
return (NULL);
if ((path = strdup(list[0].devname)) == NULL)
return (NULL);
devid_free_nmlist(list);
return (path);
}
/*
* Convert from a path to a devid string.
*/
static char *
path_to_devid(const char *path)
{
int fd;
ddi_devid_t devid;
char *minor, *ret;
if ((fd = open(path, O_RDONLY)) < 0)
return (NULL);
minor = NULL;
ret = NULL;
if (devid_get(fd, &devid) == 0) {
if (devid_get_minor_name(fd, &minor) == 0)
ret = devid_str_encode(devid, minor);
if (minor != NULL)
devid_str_free(minor);
devid_free(devid);
}
(void) close(fd);
return (ret);
}
/*
* Issue the necessary ioctl() to update the stored path value for the vdev. We
* ignore any failure here, since a common case is for an unprivileged user to
* type 'zpool status', and we'll display the correct information anyway.
*/
static void
set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
{
zfs_cmd_t zc = { 0 };
(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&zc.zc_guid) == 0);
(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
}
/*
* Given a vdev, return the name to display in iostat. If the vdev has a path,
* we use that, stripping off any leading "/dev/"; if not, we use the type.
* We also check if this is a whole disk, in which case we strip off the
* trailing 's0' slice name.
*
* This routine is also responsible for identifying when disks have been
* reconfigured in a new location. The kernel will have opened the device by
* devid, but the path will still refer to the old location. To catch this, we
* first do a path -> devid translation (which is fast for the common case). If
* the devid matches, we're done. If not, we do a reverse devid -> path
* translation and issue the appropriate ioctl() to update the path of the vdev.
* If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
* of these checks.
*/
/*
* zfs-fuse FIXME: Handle this properly
*/
char *
zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
boolean_t verbose)
{
char *path, *devid;
uint64_t value;
char buf[64];
vdev_stat_t *vs;
uint_t vsc;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&value) == 0) {
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&value) == 0);
(void) snprintf(buf, sizeof (buf), "%llu",
(u_longlong_t)value);
path = buf;
} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
/*
* If the device is dead (faulted, offline, etc) then don't
* bother opening it. Otherwise we may be forcing the user to
* open a misbehaving device, which can have undesirable
* effects.
*/
if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) != 0 ||
vs->vs_state >= VDEV_STATE_DEGRADED) &&
zhp != NULL &&
nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
/*
* Determine if the current path is correct.
*/
char *newdevid = path_to_devid(path);
if (newdevid == NULL ||
strcmp(devid, newdevid) != 0) {
char *newpath;
if ((newpath = devid_to_path(devid)) != NULL) {
/*
* Update the path appropriately.
*/
set_path(zhp, nv, newpath);
if (nvlist_add_string(nv,
ZPOOL_CONFIG_PATH, newpath) == 0)
verify(nvlist_lookup_string(nv,
ZPOOL_CONFIG_PATH,
&path) == 0);
free(newpath);
}
}
if (newdevid)
devid_str_free(newdevid);
}
if (strncmp(path, "/dev/", 5) == 0)
path += 5;
} else {
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
/*
* If it's a raidz device, we need to stick in the parity level.
*/
if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
&value) == 0);
(void) snprintf(buf, sizeof (buf), "%s%llu", path,
(u_longlong_t)value);
path = buf;
}
char str[64];
strcpy(str,path);
/*
* We identify each top-level vdev by using a <type-id>
* naming convention.
*/
if (verbose) {
uint64_t id;
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
&id) == 0);
(void) snprintf(buf, sizeof (buf), "%s-%llu", str,
(u_longlong_t)id);
path = buf;
}
}
return (zfs_strdup(hdl, path));
}
static int
zbookmark_compare(const void *a, const void *b)
{
return (memcmp(a, b, sizeof (zbookmark_t)));
}
/*
* Retrieve the persistent error log, uniquify the members, and return to the
* caller.
*/
int
zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
{
zfs_cmd_t zc = { 0 };
uint64_t count;
zbookmark_t *zb = NULL;
int i;
/*
* Retrieve the raw error list from the kernel. If the number of errors
* has increased, allocate more space and continue until we get the
* entire list.
*/
verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
&count) == 0);
if (count == 0)
return (0);
if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
return (-1);
zc.zc_nvlist_dst_size = count;
(void) strcpy(zc.zc_name, zhp->zpool_name);
for (;;) {
if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
&zc) != 0) {
free((void *)(uintptr_t)zc.zc_nvlist_dst);
if (errno == ENOMEM) {
count = zc.zc_nvlist_dst_size;
if ((zc.zc_nvlist_dst = (uintptr_t)
zfs_alloc(zhp->zpool_hdl, count *
sizeof (zbookmark_t))) == (uintptr_t)NULL)
return (-1);
} else {
return (-1);
}
} else {
break;
}
}
/*
* Sort the resulting bookmarks. This is a little confusing due to the
* implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
* to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
* _not_ copied as part of the process. So we point the start of our
* array appropriate and decrement the total number of elements.
*/
zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
zc.zc_nvlist_dst_size;
count -= zc.zc_nvlist_dst_size;
void *nvlist_dst = (void *)(uintptr_t) zc.zc_nvlist_dst;
qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
/*
* Fill in the nverrlistp with nvlist's of dataset and object numbers.
*/
for (i = 0; i < count; i++) {
nvlist_t *nv;
/* ignoring zb_blkid and zb_level for now */
if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
zb[i-1].zb_object == zb[i].zb_object)
continue;
if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
goto nomem;
if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
zb[i].zb_objset) != 0) {
nvlist_free(nv);
goto nomem;
}
if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
zb[i].zb_object) != 0) {
nvlist_free(nv);
goto nomem;
}
if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
nvlist_free(nv);
goto nomem;
}
nvlist_free(nv);
}
free(nvlist_dst);
return (0);
nomem:
free(nvlist_dst);
free((void *)(uintptr_t)zc.zc_nvlist_dst);
return (no_memory(zhp->zpool_hdl));
}
/*
* Upgrade a ZFS pool to the latest on-disk version.
*/
int
zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
{
zfs_cmd_t zc = { 0 };
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strcpy(zc.zc_name, zhp->zpool_name);
zc.zc_cookie = new_version;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
zhp->zpool_name));
return (0);
}
void
zpool_set_history_str(const char *subcommand, int argc, char **argv,
char *history_str)
{
int i;
(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
for (i = 1; i < argc; i++) {
if (strlen(history_str) + 1 + strlen(argv[i]) >
HIS_MAX_RECORD_LEN)
break;
(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
}
}
/*
* Stage command history for logging.
*/
int
zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
{
if (history_str == NULL)
return (EINVAL);
if (strlen(history_str) > HIS_MAX_RECORD_LEN)
return (EINVAL);
if (hdl->libzfs_log_str != NULL)
free(hdl->libzfs_log_str);
if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
return (no_memory(hdl));
return (0);
}
/*
* Perform ioctl to get some command history of a pool.
*
* 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
* logical offset of the history buffer to start reading from.
*
* Upon return, 'off' is the next logical offset to read from and
* 'len' is the actual amount of bytes read into 'buf'.
*/
static int
get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
{
zfs_cmd_t zc = { 0 };
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_history = (uint64_t)(uintptr_t)buf;
zc.zc_history_len = *len;
zc.zc_history_offset = *off;
if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
switch (errno) {
case EPERM:
return (zfs_error_fmt(hdl, EZFS_PERM,
dgettext(TEXT_DOMAIN,
"cannot show history for pool '%s'"),
zhp->zpool_name));
case ENOENT:
return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
dgettext(TEXT_DOMAIN, "cannot get history for pool "
"'%s'"), zhp->zpool_name));
case ENOTSUP:
return (zfs_error_fmt(hdl, EZFS_BADVERSION,
dgettext(TEXT_DOMAIN, "cannot get history for pool "
"'%s', pool must be upgraded"), zhp->zpool_name));
default:
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN,
"cannot get history for '%s'"), zhp->zpool_name));
}
}
*len = zc.zc_history_len;
*off = zc.zc_history_offset;
return (0);
}
/*
* Process the buffer of nvlists, unpacking and storing each nvlist record
* into 'records'. 'leftover' is set to the number of bytes that weren't
* processed as there wasn't a complete record.
*/
int
zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
nvlist_t ***records, uint_t *numrecords)
{
uint64_t reclen;
nvlist_t *nv;
int i;
while (bytes_read > sizeof (reclen)) {
/* get length of packed record (stored as little endian) */
for (i = 0, reclen = 0; i < sizeof (reclen); i++)
reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
if (bytes_read < sizeof (reclen) + reclen)
break;
/* unpack record */
if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
return (ENOMEM);
bytes_read -= sizeof (reclen) + reclen;
buf += sizeof (reclen) + reclen;
/* add record to nvlist array */
(*numrecords)++;
if (ISP2(*numrecords + 1)) {
*records = realloc(*records,
*numrecords * 2 * sizeof (nvlist_t *));
}
(*records)[*numrecords - 1] = nv;
}
*leftover = bytes_read;
return (0);
}
#define HIS_BUF_LEN (128*1024)
/*
* Retrieve the command history of a pool.
*/
int
zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
{
char buf[HIS_BUF_LEN];
uint64_t off = 0;
nvlist_t **records = NULL;
uint_t numrecords = 0;
int err, i;
do {
uint64_t bytes_read = sizeof (buf);
uint64_t leftover;
if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
break;
/* if nothing else was read in, we're at EOF, just return */
if (!bytes_read)
break;
if ((err = zpool_history_unpack(buf, bytes_read,
&leftover, &records, &numrecords)) != 0)
break;
off -= leftover;
/* CONSTCOND */
} while (1);
if (!err) {
verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
records, numrecords) == 0);
}
for (i = 0; i < numrecords; i++)
nvlist_free(records[i]);
free(records);
return (err);
}
void
zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len)
{
zfs_cmd_t zc = { 0 };
boolean_t mounted = B_FALSE;
char *mntpnt = NULL;
char dsname[MAXNAMELEN];
if (dsobj == 0) {
/* special case for the MOS */
(void) snprintf(pathname, len, "<metadata>:<0x%llx>", (u_longlong_t) obj);
return;
}
/* get the dataset's name */
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_obj = dsobj;
if (ioctl(zhp->zpool_hdl->libzfs_fd,
ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
/* just write out a path of two object numbers */
(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
(u_longlong_t) dsobj, (u_longlong_t) obj);
return;
}
(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
/* find out if the dataset is mounted */
mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
/* get the corrupted object's path */
(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
zc.zc_obj = obj;
if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
&zc) == 0) {
if (mounted) {
(void) snprintf(pathname, len, "%s%s", mntpnt,
zc.zc_value);
} else {
(void) snprintf(pathname, len, "%s:%s",
dsname, zc.zc_value);
}
} else {
(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (u_longlong_t) obj);
}
free(mntpnt);
}
/*
* Read the EFI label from the config, if a label does not exist then
* pass back the error to the caller. If the caller has passed a non-NULL
* diskaddr argument then we set it to the starting address of the EFI
* partition.
*/
/* ZFS-FUSE: not implemented */
#if 0
static int
read_efi_label(nvlist_t *config, diskaddr_t *sb)
{
char *path;
int fd;
char diskname[MAXPATHLEN];
int err = -1;
if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
return (err);
(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
strrchr(path, '/'));
if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
struct dk_gpt *vtoc;
if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
if (sb != NULL)
*sb = vtoc->efi_parts[0].p_start;
efi_free(vtoc);
}
(void) close(fd);
}
return (err);
}
/*
* determine where a partition starts on a disk in the current
* configuration
*/
static diskaddr_t
find_start_block(nvlist_t *config)
{
nvlist_t **child;
uint_t c, children;
diskaddr_t sb = MAXOFFSET_T;
uint64_t wholedisk;
if (nvlist_lookup_nvlist_array(config,
ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
if (nvlist_lookup_uint64(config,
ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk) != 0 || !wholedisk) {
return (MAXOFFSET_T);
}
if (read_efi_label(config, &sb) < 0)
sb = MAXOFFSET_T;
return (sb);
}
for (c = 0; c < children; c++) {
sb = find_start_block(child[c]);
if (sb != MAXOFFSET_T) {
return (sb);
}
}
return (MAXOFFSET_T);
}
#endif
/*
* Label an individual disk. The name provided is the short name,
* stripped of any leading /dev path.
*/
/* ZFS-FUSE: not implemented */
#if 0
int
zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
{
char path[MAXPATHLEN];
struct dk_gpt *vtoc;
int fd;
size_t resv = EFI_MIN_RESV_SIZE;
uint64_t slice_size;
diskaddr_t start_block;
char errbuf[1024];
/* prepare an error message just in case */
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
if (zhp) {
nvlist_t *nvroot;
if (pool_is_bootable(zhp)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"EFI labeled devices are not supported on root "
"pools."));
return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
}
verify(nvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
if (zhp->zpool_start_block == 0)
start_block = find_start_block(nvroot);
else
start_block = zhp->zpool_start_block;
zhp->zpool_start_block = start_block;
} else {
/* new pool */
start_block = NEW_START_BLOCK;
}
(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
BACKUP_SLICE);
if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
/*
* This shouldn't happen. We've long since verified that this
* is a valid device.
*/
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "unable to open device"));
return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
}
if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
/*
* The only way this can fail is if we run out of memory, or we
* were unable to read the disk's capacity
*/
if (errno == ENOMEM)
(void) no_memory(hdl);
(void) close(fd);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"unable to read disk capacity"), name);
return (zfs_error(hdl, EZFS_NOCAP, errbuf));
}
slice_size = vtoc->efi_last_u_lba + 1;
slice_size -= EFI_MIN_RESV_SIZE;
if (start_block == MAXOFFSET_T)
start_block = NEW_START_BLOCK;
slice_size -= start_block;
vtoc->efi_parts[0].p_start = start_block;
vtoc->efi_parts[0].p_size = slice_size;
/*
* Why we use V_USR: V_BACKUP confuses users, and is considered
* disposable by some EFI utilities (since EFI doesn't have a backup
* slice). V_UNASSIGNED is supposed to be used only for zero size
* partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
* etc. were all pretty specific. V_USR is as close to reality as we
* can get, in the absence of V_OTHER.
*/
vtoc->efi_parts[0].p_tag = V_USR;
(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
vtoc->efi_parts[8].p_start = slice_size + start_block;
vtoc->efi_parts[8].p_size = resv;
vtoc->efi_parts[8].p_tag = V_RESERVED;
if (efi_write(fd, vtoc) != 0) {
/*
* Some block drivers (like pcata) may not support EFI
* GPT labels. Print out a helpful error message dir-
* ecting the user to manually label the disk and give
* a specific slice.
*/
(void) close(fd);
efi_free(vtoc);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"try using fdisk(1M) and then provide a specific slice"));
return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
}
(void) close(fd);
efi_free(vtoc);
return (0);
}
static boolean_t
supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
{
char *type;
nvlist_t **child;
uint_t children, c;
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
strcmp(type, VDEV_TYPE_FILE) == 0 ||
strcmp(type, VDEV_TYPE_LOG) == 0 ||
strcmp(type, VDEV_TYPE_HOLE) == 0 ||
strcmp(type, VDEV_TYPE_MISSING) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"vdev type '%s' is not supported"), type);
(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
return (B_FALSE);
}
if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if (!supported_dump_vdev_type(hdl, child[c], errbuf))
return (B_FALSE);
}
}
return (B_TRUE);
}
/*
* check if this zvol is allowable for use as a dump device; zero if
* it is, > 0 if it isn't, < 0 if it isn't a zvol
*/
int
zvol_check_dump_config(char *arg)
{
zpool_handle_t *zhp = NULL;
nvlist_t *config, *nvroot;
char *p, *volname;
nvlist_t **top;
uint_t toplevels;
libzfs_handle_t *hdl;
char errbuf[1024];
char poolname[ZPOOL_MAXNAMELEN];
int pathlen = strlen(ZVOL_FULL_DEV_DIR);
int ret = 1;
if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
return (-1);
}
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"dump is not supported on device '%s'"), arg);
if ((hdl = libzfs_init()) == NULL)
return (1);
libzfs_print_on_error(hdl, B_TRUE);
volname = arg + pathlen;
/* check the configuration of the pool */
if ((p = strchr(volname, '/')) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"malformed dataset name"));
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
return (1);
} else if (p - volname >= ZFS_MAXNAMELEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset name is too long"));
(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
return (1);
} else {
(void) strncpy(poolname, volname, p - volname);
poolname[p - volname] = '\0';
}
if ((zhp = zpool_open(hdl, poolname)) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"could not open pool '%s'"), poolname);
(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
goto out;
}
config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"could not obtain vdev configuration for '%s'"), poolname);
(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
goto out;
}
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&top, &toplevels) == 0);
if (toplevels != 1) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' has multiple top level vdevs"), poolname);
(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
goto out;
}
if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
goto out;
}
ret = 0;
out:
if (zhp)
zpool_close(zhp);
libzfs_fini(hdl);
return (ret);
}
#endif
| pscedu/slash2-stable | zfs-fuse/src/lib/libzfs/libzfs_pool.c | C | isc | 94,615 |
/* MIT License (From https://choosealicense.com/ )
Copyright (c) 2017 Jonathan Burget support@solarfusionsoftware.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.*/
#include "TigerEngine.h"
// Global variable so the objects are not lost
static TIGValue *theNumberStack;
static TIGValue *theStringStack;
static int stackNumber;
static char *stackString;
// ************* For debugging Purposes *************
TIGValue *TIGStringNumberStack(void)
{
return theNumberStack;
}
TIGValue *TIGStringStringStack(void)
{
return theStringStack;
}
// ************* For debugging Purposes *************
void TIGStringStartStack(const char *startStackString)
{
// If there is another start stack called before the end stack free it
if (stackString != NULL)
{
free(stackString);
stackString = NULL;
}
if (startStackString == NULL)
{
stackNumber++;
}
else
{
stackString = (char *)malloc((strlen(startStackString) + 1) * sizeof(char));
if (startStackString != NULL)
{
strcpy(stackString, startStackString);
}
}
}
void TIGStringEndStack(const char *endStackString)
{
if (endStackString != NULL)
{
while (theStringStack != NULL)
{
TIGValue *theNextStack = theStringStack->nextStack;
// 0 means both strings are the same
if (strcmp(theStringStack->stackString, endStackString) == 0)
{
theStringStack = TIGStringDestroy(theStringStack);
}
theStringStack = theNextStack;
}
}
else
{
while (theNumberStack != NULL)
{
TIGValue *theNextStack = theNumberStack->nextStack;
if (theNumberStack->stackNumber == stackNumber)
{
theNumberStack = TIGStringDestroy(theNumberStack);
}
theNumberStack = theNextStack;
}
}
// If there is another end or start stack string called before this end stack free it
if (stackString != NULL)
{
free(stackString);
stackString = NULL;
}
if (endStackString == NULL)
{
stackNumber--;
}
}
TIGValue *TIGStringCreate(TIGValue *tigString, TIGBool useStack)
{
tigString = (TIGValue *)malloc(1 * sizeof(TIGValue));
if (tigString == NULL)
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringCreate() Variable:tigString Equals:NULL\n");
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
if (useStack)
{
if (stackString != NULL)
{
if (theStringStack == NULL)
{
tigString->nextStack = NULL;
}
// Add the last added TIGString to the new tigString's ->nextStack
else
{
tigString->nextStack = theStringStack;
}
tigString->stackNumber = -1;
tigString->stackString = (char *)malloc((strlen(stackString) + 1) * sizeof(char));
if (tigString->stackString != NULL)
{
strcpy(tigString->stackString, stackString);
}
else
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringCreate() Variable:tigString->stackString Equals:NULL\n");
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
}
// This adds the new tigString to the global TIGString stack
theStringStack = tigString;
}
else
{
if (theNumberStack == NULL)
{
tigString->nextStack = NULL;
}
// Add the last added TIGString to the new tigString's ->nextStack
else
{
tigString->nextStack = theNumberStack;
}
tigString->stackNumber = stackNumber;
tigString->stackString = NULL;
// This adds the tigString to the global TIGString stack
theNumberStack = tigString;
}
}
else
{
tigString->nextStack = NULL;
tigString->stackString = NULL;
tigString->stackNumber = -2;
}
tigString->nextLevel = NULL;
tigString->thisLevel = NULL;
tigString->number = 0.0;
// Sets the TIGObject's string to an empty string
tigString->string = NULL;
// object type
tigString->type = "String";
return tigString;
}
TIGValue *TIGStringDestroy(TIGValue *tigString)
{
// If the "tigString" pointer has already been used free it
if (tigString != NULL)
{
if (strcmp(tigString->type, "String") != 0)
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringDestroy() Variable:tigString->type Equals:%s Valid:\"String\"\n", tigString->type);
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return tigString;
}
if (tigString->string != NULL)
{
free(tigString->string);
tigString->string = NULL;
}
if (tigString->stackString != NULL)
{
free(tigString->stackString);
tigString->stackString = NULL;
}
tigString->number = 0.0;
tigString->stackNumber = 0;
tigString->type = NULL;
tigString->nextStack = NULL;
tigString->nextLevel = NULL;
tigString->thisLevel = NULL;
free(tigString);
tigString = NULL;
}
return tigString;
}
TIGValue *TIGStr(const char *string)
{
return TIGStringInput(NULL, string);
}
TIGValue *TIGStringInput(TIGValue *tigString, const char *string)
{
return TIGStringStackInput(tigString, string, TIGYes);
}
TIGValue *TIGStringStackInput(TIGValue *tigString, const char *string, TIGBool useStack)
{
if (tigString == NULL)
{
tigString = TIGStringCreate(tigString, useStack);
if (tigString == NULL)
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringStackInput() Variable:tigString Equals:NULL\n");
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
}
else if (strcmp(tigString->type, "String") != 0)
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringStackInput() Variable:tigString->type Equals:%s Valid:\"String\"\n", tigString->type);
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
// If there is already a string free it
if (tigString->string != NULL)
{
free(tigString->string);
tigString->string = NULL;
}
tigString->string = (char *)malloc((strlen(string) + 1) * sizeof(char));
if (tigString->string == NULL || string == NULL)
{
#ifdef TIG_DEBUG
if (string == NULL)
{
printf("ERROR Function:TIGStringStackInput() Variable:string Equals:NULL\n");
}
if (tigString->string == NULL)
{
printf("ERROR Function:TIGStringStackInput() Variable:tigString->string Equals:NULL\n");
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
else
{
strcpy(tigString->string, string);
}
return tigString;
}
char *TIGStringOutput(TIGValue *tigString)
{
if (tigString == NULL || tigString->string == NULL || strcmp(tigString->type, "String") != 0)
{
#ifdef TIG_DEBUG
if (tigString == NULL)
{
printf("ERROR Function:TIGStringOutput() Variable:tigString Equals:NULL\n");
}
else
{
if (tigString->string == NULL)
{
printf("ERROR Function:TIGStringOutput() Variable:tigString->string Equals:NULL\n");
}
if (strcmp(tigString->type, "String") != 0)
{
printf("ERROR Function:TIGStringOutput() Variable:tigString->string Equals:%s Valid:\"String\"\n", tigString->type);
}
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
else
{
return tigString->string;
}
}
TIGInteger TIGStringLength(TIGValue *tigString)
{
if (tigString == NULL || tigString->string == NULL || strcmp(tigString->type, "String") != 0)
{
#ifdef TIG_DEBUG
if (tigString == NULL)
{
printf("ERROR Function:TIGStringLength() Variable:tigString Equals:NULL\n");
}
else
{
if (tigString->string == NULL)
{
printf("ERROR Function:TIGStringLength() Variable:tigString->string Equals:NULL\n");
}
if (strcmp(tigString->type, "String") != 0)
{
printf("ERROR Function:TIGStringLength() Variable:tigString->type Equals:%s Valid:\"String\"\n", tigString->type);
}
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return -1;
}
//if (tigString != NULL && tigString->string != NULL && strcmp(tigString->type, "String") == 0)
else
{
return (int)strlen(tigString->string);
}
}
TIGValue *TIGStringInsertStringAtIndex(TIGValue *tigString1, TIGValue *tigString2, int index)
{
if (tigString1 == NULL || tigString2 == NULL || strcmp(tigString1->type, "String") != 0 || strcmp(tigString2->type, "String") != 0
|| index < 0 || index > TIGStringLength(tigString1))
{
#ifdef TIG_DEBUG
if (tigString1 == NULL)
{
printf("ERROR Function:TIGStringInsertStringAtIndex() Variable:tigString1 Equals:NULL\n");
}
else if (strcmp(tigString1->type, "String") != 0)
{
printf("ERROR Function:TIGStringInsertStringAtIndex() Variable:tigNumber Equals:%s Valid:\"String\"\n", tigString1->type);
}
if (tigString2 == NULL)
{
printf("ERROR Function:TIGStringInsertStringAtIndex() Variable:tigString2 Equals:NULL\n");
}
else if (strcmp(tigString2->type, "String") != 0)
{
printf("ERROR Function:TIGStringInsertStringAtIndex() Variable:tigNumber Equals:%s Valid:\"String\"\n", tigString2->type);
}
if (index < 0 || index > TIGStringLength(tigString1))
{
printf("ERROR Function:TIGStringInsertStringAtIndex() Variable:index Equals:%d Valid:0 to %d\n", index, TIGStringLength(tigString1));
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
char *newString = (char *)malloc(strlen(tigString1->string) + strlen(tigString2->string) + 1);
if (index == strlen(tigString1->string))
{
strcat(newString, tigString1->string);
strcat(newString, tigString2->string);
}
else
{
char character[2];
int i;
for (i = 0; i < strlen(tigString1->string); i++)
{
character[0] = tigString1->string[i];
character[1] = '\0';
if (index == i)
{
strcat(newString, tigString2->string);
}
strcat(newString, character);
}
}
TIGValue *theString = TIGStringInput(NULL, newString);
free(newString);
newString = NULL;
return theString;
}
TIGValue *TIGStringCharacterAtIndex(TIGValue *tigString, int index)
{
if (tigString == NULL || index < 0 || index >= TIGStringLength(tigString))
{
#ifdef TIG_DEBUG
if (tigString == NULL)
{
printf("ERROR Function:TIGStringCharacterAtIndex() Variable:tigString Equals:NULL\n");
}
if (index < 0 || index >= TIGStringLength(tigString))
{
printf("ERROR Function:TIGStringCharacterAtIndex() Variable:index Equals:%d Valid:0 to %d\n", index, TIGStringLength(tigString) - 1);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
char *character = TIGStringOutput(tigString);
return TIGStringWithFormat(NULL, "%c", character[index]);
}
void TIGStringRemoveCharacterAtIndex(TIGValue *tigString, int index)
{
if (tigString == NULL || index < 0 || index >= TIGStringLength(tigString))
{
#ifdef TIG_DEBUG
if (tigString == NULL)
{
printf("ERROR Function:TIGStringRemoveCharacterAtIndex() Variable:tigString Equals:NULL\n");
}
if (index < 0 || index >= TIGStringLength(tigString))
{
printf("ERROR Function:TIGStringRemoveCharacterAtIndex() Variable:index Equals:%d Valid:0 to %d\n", index, TIGStringLength(tigString) - 1);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return;
}
int length = TIGStringLength(tigString);
char *characters = TIGStringOutput(tigString);
// Since a character is being removed don't add +1 to the malloc length
char *newCharacters = (char *)malloc(length * sizeof(char));
int newIndex = 0;
int i;
for (i = 0; i < length; i++)
{
if (index != i)
{
newCharacters[newIndex] = characters[i];
newIndex++;
}
}
TIGStringInput(tigString, newCharacters);
free(newCharacters);
newCharacters = NULL;
}
TIGValue *TIGStringFromNumber(TIGValue *tigNumber)
{
if (tigNumber == NULL || strcmp(tigNumber->type, "Number") != 0)
{
#ifdef TIG_DEBUG
if (tigNumber == NULL)
{
printf("ERROR Function:TIGStringFromNumber() Variable:tigNumber Equals:NULL\n");
}
else if (strcmp(tigNumber->type, "Number") != 0)
{
printf("ERROR Function:TIGStringFromNumber() Variable:tigNumber->type Equals:%s Valid:\"Number\"\n", tigNumber->type);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
else
{
int stringLength = snprintf( NULL, 0, "%f", tigNumber->number) + 1;
char *stringBuffer = (char *)malloc(stringLength * sizeof(char));
if (stringBuffer == NULL)
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringFromNumber() Variable:stringBuffer Equals:NULL\n");
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
snprintf(stringBuffer, stringLength, "%f", tigNumber->number);
TIGValue *tigString = TIGStringInput(NULL, stringBuffer);
free(stringBuffer);
stringBuffer = NULL;
return tigString;
}
}
TIGBool TIGStringEqualsString(TIGValue *tigString1, TIGValue *tigString2)
{
if (tigString1 != NULL && strcmp(tigString1->type, "String") == 0 && tigString2 != NULL && strcmp(tigString2->type, "String") == 0
&& strcmp(tigString1->string, tigString2->string) == 0)
{
return TIGYes;
}
return TIGNo;
}
TIGValue *TIGStringObjectType(TIGValue *tigObject)
{
if (tigObject == NULL || tigObject->type == NULL)
{
#ifdef TIG_DEBUG
if (tigObject == NULL)
{
printf("ERROR Function:TIGStringObjectType() Variable:tigObject Equals:NULL\n");
}
else if (tigObject->type == NULL)
{
printf("ERROR Function:TIGStringObjectType() Variable:tigObject->type Equals:NULL\n");
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
return TIGStringInput(NULL, tigObject->type);
}
TIGValue *TIGStringAddEscapeCharacters(TIGValue *tigString)
{
if (tigString == NULL || strcmp(tigString->type, "String") != 0)
{
#ifdef TIG_DEBUG
if (tigString == NULL)
{
printf("ERROR Function:TIGStringAddEscapeCharacters() Variable:tigString Equals:NULL\n");
}
else if (strcmp(tigString->type, "String") != 0)
{
printf("ERROR Function:TIGStringAddEscapeCharacters() Variable:tigString->type Equals:%s Valid:\"String\"\n", tigString->type);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
char *string = tigString->string;
int extraCount = 0;
int i;
for (i = 0; i < strlen(string); i++)
{
switch (string[i])
{
case '"':
case '\\':
case '/':
case '\b':
case '\f':
case '\n':
case '\r':
case '\t':
extraCount++;
break;
}
}
if (extraCount > 0)
{
char *newString = (char *)malloc((strlen(string) + extraCount + 1) * sizeof(char));
int index = 0;
if (newString == NULL)
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringAddEscapeCharacters() Variable:newString Equals:NULL\n");
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
for (i = 0; i < strlen(string); i++)
{
switch (string[i])
{
case '\"':
newString[index] = '\\';
newString[index + 1] = '"';
index += 2;
break;
case '\\':
newString[index] = '\\';
newString[index + 1] = '\\';
index += 2;
break;
case '/':
newString[index] = '\\';
newString[index + 1] = '/';
index += 2;
break;
case '\b':
newString[index] = '\\';
newString[index + 1] = 'b';
index += 2;
break;
case '\f':
newString[index] = '\\';
newString[index + 1] = 'f';
index += 2;
break;
case '\n':
newString[index] = '\\';
newString[index + 1] = 'n';
index += 2;
break;
case '\r':
newString[index] = '\\';
newString[index + 1] = 'r';
index += 2;
break;
case '\t':
newString[index] = '\\';
newString[index + 1] = 't';
index += 2;
break;
default:
newString[index] = string[i];
index++;
break;
}
}
TIGValue *theNewTIGString = TIGStringInput(NULL, newString);
free(newString);
newString = NULL;
return theNewTIGString;
}
return tigString;
}
TIGValue *TIGStringRemoveEscapeCharacters(TIGValue *tigString)
{
if (tigString == NULL || strcmp(tigString->type, "String") != 0)
{
#ifdef TIG_DEBUG
if (tigString == NULL)
{
printf("ERROR Function:TIGStringRemoveEscapeCharacters() Variable:tigString Equals:NULL\n");
}
else if (strcmp(tigString->type, "String") != 0)
{
printf("ERROR Function:TIGStringRemoveEscapeCharacters() Variable:tigObject->type Equals:%s Valid:\"String\"\n", tigString->type);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
char *string = tigString->string;
int extraCount = 0;
int i;
for (i = 0; i < strlen(string); i++)
{
if (string[i] == '\\')
{
switch (string[i + 1])
{
case '"':
case '\\':
case '/':
case 'b':
case 'f':
case 'n':
case 'r':
case 't':
extraCount++;
// Below makes sure it is not read as something like \\t instead of \\ and \t
i++;
break;
}
}
}
//printf("extraCount %d\n", extraCount);
if (extraCount > 0)
{
char *newString = (char *)malloc(((strlen(string) - extraCount) + 1) * sizeof(char));
int index = 0;
if (newString == NULL)
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringRemoveEscapeCharacters() Variable:newString Equals:NULL\n");
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
for (i = 0; i < strlen(string); i++)
{
if (string[i] == '\\')
{
switch (string[i + 1])
{
case '\"':
newString[index] = '"';
index++;
i++;
break;
case '\\':
newString[index] = '\\';
index++;
i++;
break;
case '/':
newString[index] = '/';
index++;
i++;
break;
case 'b':
newString[index] = '\b';
index++;
i++;
break;
case 'f':
newString[index] = '\f';
index++;
i++;
break;
case 'n':
newString[index] = '\n';
index++;
i++;
break;
case 'r':
newString[index] = '\r';
index++;
i++;
break;
case 't':
newString[index] = '\t';
index++;
i++;
break;
}
}
else
{
newString[index] = string[i];
index++;
}
}
newString[index] = '\0';
TIGValue *theNewTIGString = TIGStringInput(NULL, newString);
if (newString != NULL)
{
free(newString);
newString = NULL;
}
return theNewTIGString;
}
return tigString;
}
TIGValue *TIGStringWithFormat(TIGValue *tigString, const char *format, ...)
{
va_list arguments;
// Find out how long the string is when the arguments are converted to text
va_start(arguments, format);
int stringLength = vsnprintf( NULL, 0, format, arguments) + 1;
va_end(arguments);
// Create the new buffer with the new string length
char *stringBuffer = (char *)malloc(stringLength * sizeof(char));
if (stringBuffer == NULL)
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringWithFormat() Variable:stringBuffer Equals:NULL\n");
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
else
{
// Use the new length of text and add the arguments to the new string buffer
va_start(arguments, format);
vsnprintf(stringBuffer, stringLength, format, arguments);
va_end(arguments);
if (stringBuffer != NULL)
{
if (tigString == NULL)
{
tigString = TIGStringInput(tigString, stringBuffer);
}
else if (tigString->string != NULL && strcmp(tigString->type, "String") == 0)
{
//printf("Length: %d\n", (int)(strlen(tigString->string) + stringLength));
// stringLength already has +1 added to it for the '\0' so adding another +1 below is not necessary
tigString->string = (char *)realloc(tigString->string, (strlen(tigString->string) + stringLength) * sizeof(char));
strcat(tigString->string, stringBuffer);
}
}
else
{
#ifdef TIG_DEBUG
if (tigString->string == NULL)
{
printf("ERROR Function:TIGStringWithFormat() Variable:tigString->string Equals:NULL\n");
}
if (strcmp(tigString->type, "String") != 0)
{
printf("ERROR Function:TIGStringWithFormat() Variable:tigString->type Equals:%s Valid:\"String\"\n", tigString->type);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
free(stringBuffer);
stringBuffer = NULL;
}
return tigString;
}
TIGValue *TIGStringWithAddedString(TIGValue *oldTigString, TIGValue *newTigString)
{
if (oldTigString == NULL || newTigString == NULL || oldTigString->string == NULL)
{
#ifdef TIG_DEBUG
if (oldTigString == NULL)
{
printf("ERROR Function:TIGStringWithAddedString() Variable:oldTigString Equals:NULL\n");
}
else if (oldTigString->string == NULL)
{
printf("ERROR Function:TIGStringWithAddedString() Variable:oldTigString->string Equals:NULL\n");
}
if (newTigString == NULL)
{
printf("ERROR Function:TIGStringWithAddedString() Variable:newTigString Equals:NULL\n");
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
oldTigString = TIGStringInsertStringAtIndex(oldTigString, newTigString, (int)strlen(oldTigString->string));
return oldTigString;
}
TIGValue *TIGStringFromObject(TIGValue *tigObject)
{
if (tigObject == NULL || strcmp(tigObject->type, "Object") != 0)
{
#ifdef TIG_DEBUG
if (tigObject == NULL)
{
printf("ERROR Function:TIGStringFromObject() Variable:tigObject Equals:NULL\n");
}
else if (strcmp(tigObject->type, "Object") != 0)
{
printf("ERROR Function:TIGStringFromObject() Variable:tigObject->type Equals:%s Valid:\"Object\"\n", tigObject->type);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
TIGObjectStartStack(NULL);
TIGArrayStartStack(NULL);
TIGNumberStartStack(NULL);
TIGValue *theString = TIGStringFromObjectWithLevel(NULL, tigObject, 1, TIGYes);
TIGNumberEndStack(NULL);
TIGArrayEndStack(NULL);
TIGObjectEndStack(NULL);
return theString;
}
TIGValue *TIGStringFromObjectForNetwork(TIGValue *tigObject)
{
if (tigObject == NULL || strcmp(tigObject->type, "Object") != 0)
{
#ifdef TIG_DEBUG
if (tigObject == NULL)
{
printf("ERROR Function:TIGStringFromObjectForNetwork() Variable:tigObject Equals:NULL\n");
}
else if (strcmp(tigObject->type, "Object") != 0)
{
printf("ERROR Function:TIGStringFromObjectForNetwork() Variable:tigObject->type Equals:%s Valid:\"Object\"\n", tigObject->type);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
TIGObjectStartStack(NULL);
TIGArrayStartStack(NULL);
TIGNumberStartStack(NULL);
TIGValue *theString = TIGStringFromObjectWithLevel(NULL, tigObject, 1, TIGNo);
TIGNumberEndStack(NULL);
TIGArrayEndStack(NULL);
TIGObjectEndStack(NULL);
return theString;
}
TIGValue *TIGStringFromArray(TIGValue *tigArray)
{
if (tigArray == NULL || strcmp(tigArray->type, "Array") != 0)
{
#ifdef TIG_DEBUG
if (tigArray == NULL)
{
printf("ERROR Function:TIGStringFromArray() Variable:tigArray Equals:NULL\n");
}
else if (strcmp(tigArray->type, "Array") != 0)
{
printf("ERROR Function:TIGStringFromArray() Variable:tigArray->type Equals:%s Valid:\"Array\"\n", tigArray->type);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
TIGObjectStartStack(NULL);
TIGArrayStartStack(NULL);
TIGNumberStartStack(NULL);
TIGValue *theString = TIGStringFromObjectWithLevel(NULL, tigArray, 1, TIGYes);
TIGNumberEndStack(NULL);
TIGArrayEndStack(NULL);
TIGObjectEndStack(NULL);
return theString;
}
TIGValue *TIGStringFromArrayForNetwork(TIGValue *tigArray)
{
if (tigArray == NULL || strcmp(tigArray->type, "Array") != 0)
{
#ifdef TIG_DEBUG
if (tigArray == NULL)
{
printf("ERROR Function:TIGStringFromArrayForNetwork() Variable:tigArray Equals:NULL\n");
}
else if (strcmp(tigArray->type, "Array") != 0)
{
printf("ERROR Function:TIGStringFromArrayForNetwork() Variable:tigArray->type Equals:%s Valid:\"Array\"\n", tigArray->type);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return NULL;
}
TIGObjectStartStack(NULL);
TIGArrayStartStack(NULL);
TIGNumberStartStack(NULL);
TIGValue *theString = TIGStringFromObjectWithLevel(NULL, tigArray, 1, TIGNo);
TIGNumberEndStack(NULL);
TIGArrayEndStack(NULL);
TIGObjectEndStack(NULL);
return theString;
}
// The JSON string outputs a TIGString but the functions below have their own stack
TIGValue *TIGStringFromObjectWithLevel(TIGValue *tigString, TIGValue *tigValue, int level, TIGBool useEscapeCharacters)
{
int i;
if (strcmp(tigValue->type, "Array") == 0)
{
TIGValue *theTIGStringTabs = NULL;
TIGValue *theTIGStringEndTab = NULL;
if (useEscapeCharacters)
{
for (i = 0; i < level; i++)
{
theTIGStringTabs = TIGStringWithFormat(theTIGStringTabs, "\t");
if (i < level - 1)
{
theTIGStringEndTab = TIGStringWithFormat(theTIGStringEndTab, "\t");
}
}
tigString = TIGStringWithFormat(tigString, "[\n");
}
else
{
tigString = TIGStringWithFormat(tigString, "[");
}
for (i = 0; i < TIGArrayCount(tigValue); i++)
{
TIGValue *theTIGValue = TIGArrayValueAtIndex(tigValue, i);
if (useEscapeCharacters)
{
tigString = TIGStringWithAddedString(tigString, theTIGStringTabs);
}
tigString = TIGStringFromObjectWithLevel(tigString, theTIGValue, level + 1, useEscapeCharacters);
if (useEscapeCharacters)
{
if (i < TIGArrayCount(tigValue) - 1)
{
tigString = TIGStringWithFormat(tigString, ",\n");
}
else
{
tigString = TIGStringWithFormat(tigString, "\n");
}
}
else
{
if (i < TIGArrayCount(tigValue) - 1)
{
tigString = TIGStringWithFormat(tigString, ",");
}
}
}
if (level > 1 && useEscapeCharacters)
{
tigString = TIGStringWithAddedString(tigString, theTIGStringEndTab);
}
tigString = TIGStringWithFormat(tigString, "]");
}
else if (strcmp(tigValue->type, "Number") == 0)
{
if (tigValue->string != NULL)
{
if (strcmp(tigValue->string, "false") == 0 || strcmp(tigValue->string, "true") == 0)
{
tigString = TIGStringWithAddedString(tigString, TIGStringInput(NULL, tigValue->string));
}
}
else
{
tigString = TIGStringWithAddedString(tigString, TIGStringFromNumber(tigValue));
}
}
else if (strcmp(tigValue->type, "Object") == 0)
{
TIGValue *theTIGArrayStrings = TIGArrayOfObjectStrings(tigValue);
TIGValue *theTIGArrayValues = TIGArrayOfObjectValues(tigValue);
TIGValue *theTIGStringTabs = NULL;
TIGValue *theTIGStringEndTab = NULL;
if (useEscapeCharacters)
{
for (i = 0; i < level; i++)
{
theTIGStringTabs = TIGStringWithFormat(theTIGStringTabs, "\t");
if (i < level - 1)
{
theTIGStringEndTab = TIGStringWithFormat(theTIGStringEndTab, "\t");
}
}
tigString = TIGStringWithFormat(tigString, "{\n");
}
else
{
tigString = TIGStringWithFormat(tigString, "{");
}
for (i = 0; i < TIGArrayCount(theTIGArrayStrings); i++)
{
TIGValue *theTIGString = TIGArrayValueAtIndex(theTIGArrayStrings, i);
TIGValue *theTIGValue = TIGArrayValueAtIndex(theTIGArrayValues, i);
if (useEscapeCharacters)
{
tigString = TIGStringWithAddedString(tigString, theTIGStringTabs);
tigString = TIGStringWithFormat(tigString, "\"%s\": ", TIGStringOutput(TIGStringAddEscapeCharacters(theTIGString)));
}
else
{
tigString = TIGStringWithFormat(tigString, "\"%s\":", TIGStringOutput(TIGStringAddEscapeCharacters(theTIGString)));
}
tigString = TIGStringFromObjectWithLevel(tigString, theTIGValue, level + 1, useEscapeCharacters);
if (useEscapeCharacters)
{
if (i < TIGArrayCount(theTIGArrayStrings) - 1)
{
tigString = TIGStringWithFormat(tigString, ",\n");
}
else
{
tigString = TIGStringWithFormat(tigString, "\n");
}
}
else
{
if (i < TIGArrayCount(theTIGArrayStrings) - 1)
{
tigString = TIGStringWithFormat(tigString, ",");
}
}
}
if (level > 1 && useEscapeCharacters)
{
tigString = TIGStringWithAddedString(tigString, theTIGStringEndTab);
}
tigString = TIGStringWithFormat(tigString, "}");
}
else if (strcmp(tigValue->type, "String") == 0)
{
tigString = TIGStringWithFormat(tigString, "\"%s\"", TIGStringOutput(TIGStringAddEscapeCharacters(tigValue)));
}
return tigString;
}
void TIGStringWriteWithFilename(TIGValue *tigString, TIGValue *filenameString)
{
if (tigString == NULL || filenameString == NULL || strcmp(tigString->type, "String") != 0 || strcmp(filenameString->type, "String") != 0)
{
#ifdef TIG_DEBUG
if (tigString == NULL)
{
printf("ERROR Function:TIGStringWriteWithFilename() Variable:tigString Equals:NULL\n");
}
else if (strcmp(tigString->type, "String") != 0)
{
printf("ERROR Function:TIGStringWriteWithFilename() Variable:tigString->type Equals:%s Valid:\"String\"\n", tigString->type);
}
if (filenameString == NULL)
{
printf("ERROR Function:TIGStringWriteWithFilename() Variable:filenameString Equals:NULL\n");
}
else if (strcmp(filenameString->type, "String") != 0)
{
printf("ERROR Function:TIGStringWriteWithFilename() Variable:filenameString->type Equals:%s Valid:\"String\"\n", filenameString->type);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
}
else
{
FILE *theFile = fopen(filenameString->string, "w");
if (theFile != NULL)
{
fprintf(theFile, "%s", tigString->string);
}
else
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringWriteWithFilename() Variable:theFile Equals:NULL\n");
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
}
fclose(theFile);
}
}
TIGValue *TIGStringReadFromFilename(TIGValue *filenameString)
{
if (filenameString == NULL || filenameString->string == NULL || strcmp(filenameString->type, "String") != 0)
{
if (filenameString == NULL)
{
printf("ERROR Function:TIGStringWriteWithFilename() Variable:tigString Equals:NULL\n");
}
else
{
if (strcmp(filenameString->type, "String") != 0)
{
printf("ERROR Function:TIGStringWriteWithFilename() Variable:filenameString->type Equals:%s Valid:\"String\"\n", filenameString->type);
}
if (filenameString->string == NULL)
{
printf("ERROR Function:TIGStringWriteWithFilename() Variable:filenameString->string Equals:NULL\n");
}
}
return NULL;
}
FILE *theFile = fopen(filenameString->string, "r");
int index = 0, block = 1, maxBlockLength = 100;
char *newString = NULL;
char *buffer = malloc(maxBlockLength * sizeof(char));
if (theFile == NULL)
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringReadFromFilename() Variable:theFile Equals:NULL\n");
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
if (buffer != NULL)
{
free(buffer);
buffer = NULL;
}
fclose(theFile);
return NULL;
}
if (buffer == NULL)
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringReadFromFilename() Variable:buffer Equals:NULL\n");
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
fclose(theFile);
return NULL;
}
while (1)
{
buffer[index] = fgetc(theFile);
if ((buffer[index] != EOF && index >= maxBlockLength - 2) || buffer[index] == EOF)
{
int stringLength;
buffer[index + 1] = '\0';
if (newString == NULL)
{
stringLength = 0;
}
else
{
stringLength = (int)strlen(newString);
}
if (buffer[index] == EOF)
{
//printf("END Buffer: %d String Length: %d\n", (int)strlen(buffer), stringLength);
// Since the "buffer" variable already has '\0' +1 is not needed
newString = realloc(newString, (strlen(buffer) + stringLength) * sizeof(char));
}
else
{
//printf("Buffer: %d String Length: %d\n", (int)strlen(buffer), stringLength);
newString = realloc(newString, (strlen(buffer) + stringLength) * sizeof(char));
}
if (newString == NULL)
{
#ifdef TIG_DEBUG
printf("ERROR Function:TIGStringReadFromFilename() Variable:newString Equals:NULL\n");
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
free(buffer);
buffer = NULL;
fclose(theFile);
return NULL;
}
strcat(newString, buffer);
if (buffer[index] == EOF)
{
//printf("Total String Length: %d", (int)strlen(newString));
// Since the "buffer" always uses the same size block '\0' with -1 is needed for the last index number
newString[strlen(newString) - 1] = '\0';
free(buffer);
buffer = NULL;
break;
}
else
{
free(buffer);
buffer = NULL;
buffer = malloc(maxBlockLength * sizeof(char));
index = -1;
block++;
}
}
index++;
}
fclose(theFile);
TIGValue *theString = TIGStringInput(NULL, newString);
free(newString);
newString = NULL;
return theString;
}
TIGBool TIGStringPrefix(TIGValue *tigString, TIGValue *tigStringPrefix)
{
if (tigString == NULL || strcmp(tigString->type, "String") != 0 || tigStringPrefix == NULL || strcmp(tigStringPrefix->type, "String") != 0)
{
#ifdef TIG_DEBUG
if (tigString == NULL)
{
printf("ERROR Function:TIGStringPrefix() Variable:tigString Equals:NULL\n");
}
else if (strcmp(tigString->type, "String") != 0)
{
printf("ERROR Function:TIGStringPrefix() Variable:tigString->type Equals:%s Valid:\"String\"\n", tigString->type);
}
if (tigStringPrefix == NULL)
{
printf("ERROR Function:TIGStringPrefix() Variable:tigStringPrefix Equals:NULL\n");
}
else if (strcmp(tigStringPrefix->type, "String") != 0)
{
printf("ERROR Function:TIGStringPrefix() Variable:tigStringPrefix->type Equals:%s Valid:\"String\"\n", tigStringPrefix->type);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return TIGNo;
}
if (strlen(tigString->string) > 0 && strlen(tigStringPrefix->string) > 0)
{
int i;
for (i = 0; i < strlen(tigString->string); i++)
{
if (tigString->string[i] == tigStringPrefix->string[i])
{
// The prefix has been found
if (i >= strlen(tigStringPrefix->string) - 1)
{
return TIGYes;
}
}
else
{
return TIGNo;
}
}
}
return TIGNo;
}
TIGBool TIGStringSuffix(TIGValue *tigString, TIGValue *tigStringSuffix)
{
if (tigString == NULL || strcmp(tigString->type, "String") != 0 || tigStringSuffix == NULL || strcmp(tigStringSuffix->type, "String") != 0)
{
#ifdef TIG_DEBUG
if (tigString == NULL)
{
printf("ERROR Function:TIGStringSuffix() Variable:tigString Equals:NULL\n");
}
else if (strcmp(tigString->type, "String") != 0)
{
printf("ERROR Function:TIGStringSuffix() Variable:tigString->type Equals:%s Valid:\"String\"\n", tigString->type);
}
if (tigStringSuffix == NULL)
{
printf("ERROR Function:TIGStringSuffix() Variable:tigStringSuffix Equals:NULL\n");
}
else if (strcmp(tigStringSuffix->type, "String") != 0)
{
printf("ERROR Function:TIGStringSuffix() Variable:tigStringSuffix->type Equals:%s Valid:\"String\"\n", tigStringSuffix->type);
}
#ifdef TIG_DEBUG_ASSERT
assert(0);
#endif
#endif
return TIGNo;
}
if (strlen(tigString->string) > 0 && strlen(tigStringSuffix->string) > 0)
{
int suffixIndex = 0, suffixTotal = (int)strlen(tigStringSuffix->string), index = 0, total = (int)strlen(tigString->string);
while (1)
{
if (tigString->string[total - index - 1] == tigStringSuffix->string[suffixTotal - suffixIndex - 1])
{
// The suffix was found
if (suffixIndex >= suffixTotal - 1)
{
return TIGYes;
}
suffixIndex++;
index++;
}
else
{
return TIGNo;
}
}
}
return TIGNo;
}
| TigerFusion/TigerEngine | TIGString.c | C | mit | 36,020 |
// rd_route.c
// Copyright (c) 2014-2015 Dmitry Rodionov
//
// This software may be modified and distributed under the terms
// of the MIT license. See the LICENSE file for details.
#include <stdlib.h> // realloc()
#include <libgen.h> // basename()
#include <assert.h> // assert()
#include <stdio.h> // fprintf()
#include <dlfcn.h> // dladdr()
#include "TargetConditionals.h"
#if defined(__i386__) || defined(__x86_64__)
#if !(TARGET_IPHONE_SIMULATOR)
#include <mach/mach_vm.h> // mach_vm_*
#else
#include <mach/vm_map.h> // vm_*
#define mach_vm_address_t vm_address_t
#define mach_vm_size_t vm_size_t
#define mach_vm_allocate vm_allocate
#define mach_vm_deallocate vm_deallocate
#define mach_vm_write vm_write
#define mach_vm_remap vm_remap
#define mach_vm_protect vm_protect
#define NSLookupSymbolInImage(...) ((void)0)
#define NSAddressOfSymbol(...) ((void)0)
#endif
#else
#endif
#include <mach-o/dyld.h> // _dyld_*
#include <mach-o/nlist.h> // nlist/nlist_64
#include <mach/mach_init.h> // mach_task_self()
#include "rd_route.h"
#define RDErrorLog(format, ...) fprintf(stderr, "%s:%d:\n\terror: "format"\n", \
__FILE__, __LINE__, ##__VA_ARGS__)
#if defined(__x86_64__)
typedef struct mach_header_64 mach_header_t;
typedef struct segment_command_64 segment_command_t;
#define LC_SEGMENT_ARCH_INDEPENDENT LC_SEGMENT_64
typedef struct nlist_64 nlist_t;
#else
typedef struct mach_header mach_header_t;
typedef struct segment_command segment_command_t;
#define LC_SEGMENT_ARCH_INDEPENDENT LC_SEGMENT
typedef struct nlist nlist_t;
#endif
typedef struct rd_injection {
mach_vm_address_t injected_mach_header;
mach_vm_address_t target_address;
} rd_injection_t;
static void* _function_ptr_within_image(const char *function_name, void *macho_image_header, uintptr_t vm_image_slide);
void* function_ptr_from_name(const char *function_name)
{
assert(function_name);
for (uint32_t i = 0; i < _dyld_image_count(); i++) {
void *header = (void *)_dyld_get_image_header(i);
uintptr_t vmaddr_slide = _dyld_get_image_vmaddr_slide(i);
void *ptr = _function_ptr_within_image(function_name, header, vmaddr_slide);
if (ptr) { return ptr; }
}
RDErrorLog("Failed to find symbol `%s` in the current address space.", function_name);
return NULL;
}
static void* _function_ptr_within_image(const char *function_name, void *macho_image_header, uintptr_t vmaddr_slide)
{
assert(function_name);
assert(macho_image_header);
/**
* Try the system NSLookup API to find out the function's pointer withing the specifed header.
*/
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated"
void *pointer_via_NSLookup = ({
NSSymbol symbol = NSLookupSymbolInImage(macho_image_header, function_name,
NSLOOKUPSYMBOLINIMAGE_OPTION_RETURN_ON_ERROR);
NSAddressOfSymbol(symbol);
});
#pragma clang diagnostic pop
if (pointer_via_NSLookup) return pointer_via_NSLookup;
return NULL;
}
| XVimProject/XVim2 | XVim2/Helper/rd_route.c | C | mit | 3,016 |
/*
* COPYRIGHT: Stealthy Labs LLC
* DATE: 29th May 2015
* AUTHOR: Stealthy Labs
* SOFTWARE: Tea Time
*/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <math.h>
#include <teatime.h>
static int teatime_check_gl_version(uint32_t *major, uint32_t *minor);
static int teatime_check_program_errors(GLuint program);
static int teatime_check_shader_errors(GLuint shader);
#define TEATIME_BREAKONERROR(FN,RC) if ((RC = teatime_check_gl_errors(__LINE__, #FN )) < 0) break
#define TEATIME_BREAKONERROR_FB(FN,RC) if ((RC = teatime_check_gl_fb_errors(__LINE__, #FN )) < 0) break
teatime_t *teatime_setup()
{
int rc = 0;
teatime_t *obj = calloc(1, sizeof(teatime_t));
if (!obj) {
fprintf(stderr, "Out of memory allocating %zu bytes\n",
sizeof(teatime_t));
return NULL;
}
do {
uint32_t version[2] = { 0, 0};
if (teatime_check_gl_version(&version[0], &version[1]) < 0) {
fprintf(stderr, "Unable to verify OpenGL version\n");
rc = -1;
break;
}
if (version[0] < 3) {
fprintf(stderr, "Minimum Required OpenGL version 3.0. You have %u.%u\n",
version[0], version[1]);
rc = -1;
break;
}
/* initialize off-screen framebuffer */
/*
* This is the EXT_framebuffer_object OpenGL extension that allows us to
* use an offscreen buffer as a target for rendering operations such as
* vector calculations, providing full precision and removing unwanted
* clamping issues.
* we are turning off the traditional framebuffer here apparently.
*/
glGenFramebuffersEXT(1, &(obj->ofb));
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, obj->ofb);
TEATIME_BREAKONERROR(glBindFramebufferEXT, rc);
fprintf(stderr, "Successfully created off-screen framebuffer with id: %d\n",
obj->ofb);
/* get the texture size */
obj->maxtexsz = -1;
glGetIntegerv(GL_MAX_TEXTURE_SIZE, &(obj->maxtexsz));
fprintf(stderr, "Maximum Texture size for the GPU: %d\n", obj->maxtexsz);
obj->itexid = obj->otexid = 0;
obj->shader = obj->program = 0;
} while (0);
if (rc < 0) {
teatime_cleanup(obj);
obj = NULL;
}
return obj;
}
void teatime_cleanup(teatime_t *obj)
{
if (obj) {
teatime_delete_program(obj);
teatime_delete_textures(obj);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
glDeleteFramebuffersEXT(1, &(obj->ofb));
glFlush();
free(obj);
obj = NULL;
}
}
int teatime_set_viewport(teatime_t *obj, uint32_t ilen)
{
uint32_t texsz = (uint32_t)((long)(sqrt(ilen / 4.0)));
if (obj && texsz > 0 && texsz < (GLuint)obj->maxtexsz) {
/* viewport mapping 1:1 pixel = texel = data mapping */
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, texsz, 0.0, texsz);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glViewport(0, 0, texsz, texsz);
obj->tex_size = texsz;
fprintf(stderr, "Texture size: %u x %u\n", texsz, texsz);
return 0;
} else if (obj) {
fprintf(stderr, "Max. texture size is %d. Calculated: %u from input length: %u\n",
obj->maxtexsz, texsz, ilen);
}
return -EINVAL;
}
int teatime_create_textures(teatime_t *obj, const uint32_t *input, uint32_t ilen)
{
if (obj && input && ilen > 0) {
int rc = 0;
do {
uint32_t texsz = (uint32_t)((long)(sqrt(ilen / 4.0)));
if (texsz != obj->tex_size) {
fprintf(stderr, "Viewport texture size(%u) != Input texture size (%u)\n",
obj->tex_size, texsz);
rc = -EINVAL;
break;
}
glGenTextures(1, &(obj->itexid));
glGenTextures(1, &(obj->otexid));
fprintf(stderr, "Created input texture with ID: %u\n", obj->itexid);
fprintf(stderr, "Created output texture with ID: %u\n", obj->otexid);
/** BIND ONE TEXTURE AT A TIME **/
/* the texture target can vary depending on GPU */
glBindTexture(GL_TEXTURE_2D, obj->itexid);
TEATIME_BREAKONERROR(glBindTexture, rc);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
/* turn off filtering and set proper wrap mode - this is obligatory for
* floating point textures */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
TEATIME_BREAKONERROR(glTexParameteri, rc);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
TEATIME_BREAKONERROR(glTexParameteri, rc);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
TEATIME_BREAKONERROR(glTexParameteri, rc);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
TEATIME_BREAKONERROR(glTexParameteri, rc);
/* create a 2D texture of the same size as the data
* internal format: GL_RGBA32UI_EXT
* texture format: GL_RGBA_INTEGER
* texture type: GL_UNSIGNED_INT
*/
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32UI_EXT,
texsz, texsz, 0, GL_RGBA_INTEGER, GL_UNSIGNED_INT, NULL);
TEATIME_BREAKONERROR(glTexImage2D, rc);
/* transfer data to texture */
#ifdef WIN32
glTexSubImage2D
#else
glTexSubImage2DEXT
#endif
(GL_TEXTURE_2D, 0, 0, 0, obj->tex_size,
obj->tex_size, GL_RGBA_INTEGER, GL_UNSIGNED_INT, input);
#ifdef WIN32
TEATIME_BREAKONERROR(glTexSubImage2D, rc);
#else
TEATIME_BREAKONERROR(glTexSubImage2DEXT, rc);
#endif
fprintf(stderr, "Successfully transferred input data to texture ID: %u\n", obj->itexid);
/* BIND the OUTPUT texture and work on it */
/* the texture target can vary depending on GPU */
glBindTexture(GL_TEXTURE_2D, obj->otexid);
TEATIME_BREAKONERROR(glBindTexture, rc);
/* turn off filtering and set proper wrap mode - this is obligatory for
* floating point textures */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
TEATIME_BREAKONERROR(glTexParameteri, rc);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
TEATIME_BREAKONERROR(glTexParameteri, rc);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
TEATIME_BREAKONERROR(glTexParameteri, rc);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
TEATIME_BREAKONERROR(glTexParameteri, rc);
/* create a 2D texture of the same size as the data
* internal format: GL_RGBA32UI_EXT
* texture format: GL_RGBA_INTEGER
* texture type: GL_UNSIGNED_INT
*/
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32UI_EXT,
texsz, texsz, 0, GL_RGBA_INTEGER, GL_UNSIGNED_INT, NULL);
TEATIME_BREAKONERROR(glTexImage2D, rc);
/* change tex-env to replace instead of the default modulate */
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
TEATIME_BREAKONERROR(glTexEnvi, rc);
/* attach texture */
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT,
GL_TEXTURE_2D, obj->otexid, 0);
TEATIME_BREAKONERROR(glFramebufferTexture2DEXT, rc);
TEATIME_BREAKONERROR_FB(glFramebufferTexture2DEXT, rc);
glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT);
TEATIME_BREAKONERROR(glDrawBuffer, rc);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT1_EXT,
GL_TEXTURE_2D, obj->otexid, 0);
TEATIME_BREAKONERROR(glFramebufferTexture2DEXT, rc);
TEATIME_BREAKONERROR_FB(glFramebufferTexture2DEXT, rc);
rc = 0;
} while (0);
return rc;
}
return -EINVAL;
}
int teatime_read_textures(teatime_t *obj, uint32_t *output, uint32_t olen)
{
if (obj && output && olen > 0 && obj->otexid > 0) {
int rc = 0;
do {
uint32_t texsz = (uint32_t)((long)(sqrt(olen / 4.0)));
if (texsz != obj->tex_size) {
fprintf(stderr, "Viewport texture size(%u) != Input texture size (%u)\n",
obj->tex_size, texsz);
rc = -EINVAL;
break;
}
/* read the texture back */
glReadBuffer(GL_COLOR_ATTACHMENT0_EXT);
TEATIME_BREAKONERROR(glReadBuffer, rc);
glReadPixels(0, 0, obj->tex_size, obj->tex_size, GL_RGBA_INTEGER, GL_UNSIGNED_INT, output);
TEATIME_BREAKONERROR(glReadPixels, rc);
fprintf(stderr, "Successfully read data from the texture\n");
} while (0);
return rc;
}
return -EINVAL;
}
int teatime_create_program(teatime_t *obj, const char *source)
{
if (obj && source) {
int rc = 0;
do {
obj->program = glCreateProgram();
TEATIME_BREAKONERROR(glCreateProgram, rc);
obj->shader = glCreateShader(GL_FRAGMENT_SHADER_ARB);
TEATIME_BREAKONERROR(glCreateShader, rc);
glShaderSource(obj->shader, 1, &source, NULL);
TEATIME_BREAKONERROR(glShaderSource, rc);
glCompileShader(obj->shader);
rc = teatime_check_shader_errors(obj->shader);
if (rc < 0) break;
TEATIME_BREAKONERROR(glCompileShader, rc);
glAttachShader(obj->program, obj->shader);
TEATIME_BREAKONERROR(glAttachShader, rc);
glLinkProgram(obj->program);
rc = teatime_check_program_errors(obj->program);
if (rc < 0) break;
TEATIME_BREAKONERROR(glLinkProgram, rc);
obj->locn_input = glGetUniformLocation(obj->program, "idata");
TEATIME_BREAKONERROR(glGetUniformLocation, rc);
obj->locn_output = glGetUniformLocation(obj->program, "odata");
TEATIME_BREAKONERROR(glGetUniformLocation, rc);
obj->locn_key = glGetUniformLocation(obj->program, "ikey");
TEATIME_BREAKONERROR(glGetUniformLocation, rc);
obj->locn_rounds = glGetUniformLocation(obj->program, "rounds");
TEATIME_BREAKONERROR(glGetUniformLocation, rc);
rc = 0;
} while (0);
return rc;
}
return -EINVAL;
}
int teatime_run_program(teatime_t *obj, const uint32_t ikey[4], uint32_t rounds)
{
if (obj && obj->program > 0) {
int rc = 0;
do {
glUseProgram(obj->program);
TEATIME_BREAKONERROR(glUseProgram, rc);
glActiveTexture(GL_TEXTURE0);
TEATIME_BREAKONERROR(glActiveTexture, rc);
glBindTexture(GL_TEXTURE_2D, obj->itexid);
TEATIME_BREAKONERROR(glBindTexture, rc);
glUniform1i(obj->locn_input, 0);
TEATIME_BREAKONERROR(glUniform1i, rc);
glActiveTexture(GL_TEXTURE1);
TEATIME_BREAKONERROR(glActiveTexture, rc);
glBindTexture(GL_TEXTURE_2D, obj->otexid);
TEATIME_BREAKONERROR(glBindTexture, rc);
glUniform1i(obj->locn_output, 1);
TEATIME_BREAKONERROR(glUniform1i, rc);
glUniform4uiv(obj->locn_key, 1, ikey);
TEATIME_BREAKONERROR(glUniform1uiv, rc);
glUniform1ui(obj->locn_rounds, rounds);
TEATIME_BREAKONERROR(glUniform1ui, rc);
glFinish();
glPolygonMode(GL_FRONT, GL_FILL);
/* render */
glBegin(GL_QUADS);
glTexCoord2i(0, 0);
glVertex2i(0, 0);
//glTexCoord2i(obj->tex_size, 0);
glTexCoord2i(1, 0);
glVertex2i(obj->tex_size, 0);
//glTexCoord2i(obj->tex_size, obj->tex_size);
glTexCoord2i(1, 1);
glVertex2i(obj->tex_size, obj->tex_size);
glTexCoord2i(0, 1);
//glTexCoord2i(0, obj->tex_size);
glVertex2i(0, obj->tex_size);
glEnd();
glFinish();
TEATIME_BREAKONERROR_FB(Rendering, rc);
TEATIME_BREAKONERROR(Rendering, rc);
rc = 0;
} while (0);
return rc;
}
return -EINVAL;
}
void teatime_delete_textures(teatime_t *obj)
{
if (obj) {
if (obj->itexid != 0) {
glDeleteTextures(1, &(obj->itexid));
obj->itexid = 0;
}
if (obj->otexid != 0) {
glDeleteTextures(1, &(obj->otexid));
obj->otexid = 0;
}
}
}
void teatime_delete_program(teatime_t *obj)
{
if (obj) {
if (obj->shader > 0 && obj->program > 0) {
glDetachShader(obj->program, obj->shader);
}
if (obj->shader > 0)
glDeleteShader(obj->shader);
if (obj->program > 0)
glDeleteProgram(obj->program);
obj->shader = 0;
obj->program = 0;
}
}
void teatime_print_version(FILE *fp)
{
const GLubyte *version = NULL;
version = glGetString(GL_VERSION);
fprintf(fp, "GL Version: %s\n", (const char *)version);
version = glGetString(GL_SHADING_LANGUAGE_VERSION);
fprintf(fp, "GLSL Version: %s\n", (const char *)version);
version = glGetString(GL_VENDOR);
fprintf(fp, "GL Vendor: %s\n", (const char *)version);
}
int teatime_check_gl_version(uint32_t *major, uint32_t *minor)
{
const GLubyte *version = NULL;
version = glGetString(GL_VERSION);
if (version) {
uint32_t ver[2] = { 0, 0 };
char *endp = NULL;
char *endp2 = NULL;
errno = 0;
ver[0] = strtol((const char *)version, &endp, 10);
if (errno == ERANGE || (const void *)endp == (const void *)version) {
fprintf(stderr, "Version string %s cannot be parsed\n", (const char *)version);
return -1;
}
/* endp[0] = '.' and endp[1] points to minor */
errno = 0;
ver[1] = strtol((const char *)&endp[1], &endp2, 10);
if (errno == ERANGE || endp2 == &endp[1]) {
fprintf(stderr, "Version string %s cannot be parsed\n", (const char *)version);
return -1;
}
if (major)
*major = ver[0];
if (minor)
*minor = ver[1];
return 0;
}
return -1;
}
int teatime_check_gl_errors(int line, const char *fn_name)
{
GLenum err = glGetError();
if (err != GL_NO_ERROR) {
const GLubyte *estr = gluErrorString(err);
fprintf(stderr, "%s(): GL Error(%d) on line %d: %s\n", fn_name,
err, line, (const char *)estr);
return -1;
}
return 0;
}
int teatime_check_gl_fb_errors(int line, const char *fn_name)
{
GLenum st = (GLenum)glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT);
switch (st) {
case GL_FRAMEBUFFER_COMPLETE_EXT:
return 0;
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT:
fprintf(stderr, "%s(): GL FB error on line %d: incomplete attachment\n", fn_name, line);
break;
case GL_FRAMEBUFFER_UNSUPPORTED_EXT:
fprintf(stderr, "%s(): GL FB error on line %d: unsupported\n", fn_name, line);
break;
case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT:
fprintf(stderr, "%s(): GL FB error on line %d: incomplete missing attachment\n", fn_name, line);
break;
case GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT:
fprintf(stderr, "%s(): GL FB error on line %d: incomplete dimensions\n", fn_name, line);
break;
case GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT:
fprintf(stderr, "%s(): GL FB error on line %d: incomplete formats\n", fn_name, line);
break;
case GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT:
fprintf(stderr, "%s(): GL FB error on line %d: incomplete draw buffer\n", fn_name, line);
break;
case GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT:
fprintf(stderr, "%s(): GL FB error on line %d: incomplete read buffer\n", fn_name, line);
break;
default:
fprintf(stderr, "%s(): GL FB error on line %d: Unknown. Error Value: %d\n", fn_name, line, st);
break;
}
return -1;
}
int teatime_check_program_errors(GLuint program)
{
GLint ilen = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &ilen);
if (ilen > 1) {
GLsizei wb = 0;
GLchar *buf = calloc(ilen, sizeof(GLchar));
if (!buf) {
fprintf(stderr, "Out of memory allocating %d bytes\n", ilen);
return -ENOMEM;
}
glGetProgramInfoLog(program, ilen, &wb, buf);
buf[wb] = '\0';
fprintf(stderr, "Program Errors:\n%s\n",
(const char *)buf);
free(buf);
}
return 0;
}
int teatime_check_shader_errors(GLuint shader)
{
GLint ilen = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &ilen);
if (ilen > 1) {
GLsizei wb = 0;
GLchar *buf = calloc(ilen, sizeof(GLchar));
if (!buf) {
fprintf(stderr, "Out of memory allocating %d bytes\n", ilen);
return -ENOMEM;
}
glGetShaderInfoLog(shader, ilen, &wb, buf);
buf[wb] = '\0';
fprintf(stderr, "Shader Errors:\n%s\n",
(const char *)buf);
free(buf);
}
return 0;
}
#define TEA_ENCRYPT_SOURCE \
"#version 130\n" \
"#extension GL_EXT_gpu_shader4 : enable\n" \
"uniform usampler2D idata;\n" \
"uniform uvec4 ikey; \n" \
"uniform uint rounds; \n" \
"out uvec4 odata; \n" \
"void main(void) {\n" \
" uvec4 x = texture(idata, gl_TexCoord[0].st);\n" \
" uint delta = uint(0x9e3779b9); \n" \
" uint sum = uint(0); \n" \
" for (uint i = uint(0); i < rounds; ++i) {\n" \
" sum += delta; \n" \
" x[0] += (((x[1] << 4) + ikey[0]) ^ (x[1] + sum)) ^ ((x[1] >> 5) + ikey[1]);\n" \
" x[1] += (((x[0] << 4) + ikey[2]) ^ (x[0] + sum)) ^ ((x[0] >> 5) + ikey[3]);\n" \
" x[2] += (((x[3] << 4) + ikey[0]) ^ (x[3] + sum)) ^ ((x[3] >> 5) + ikey[1]);\n" \
" x[3] += (((x[2] << 4) + ikey[2]) ^ (x[2] + sum)) ^ ((x[2] >> 5) + ikey[3]);\n" \
" }\n" \
" odata = x; \n" \
"}\n"
#define TEA_DECRYPT_SOURCE \
"#version 130\n" \
"#extension GL_EXT_gpu_shader4 : enable\n" \
"uniform usampler2D idata;\n" \
"uniform uvec4 ikey; \n" \
"uniform uint rounds; \n" \
"out uvec4 odata; \n" \
"void main(void) {\n" \
" uvec4 x = texture(idata, gl_TexCoord[0].st);\n" \
" uint delta = uint(0x9e3779b9); \n" \
" uint sum = delta * rounds; \n" \
" for (uint i = uint(0); i < rounds; ++i) {\n" \
" x[1] -= (((x[0] << 4) + ikey[2]) ^ (x[0] + sum)) ^ ((x[0] >> 5) + ikey[3]);\n" \
" x[0] -= (((x[1] << 4) + ikey[0]) ^ (x[1] + sum)) ^ ((x[1] >> 5) + ikey[1]);\n" \
" x[3] -= (((x[2] << 4) + ikey[2]) ^ (x[2] + sum)) ^ ((x[2] >> 5) + ikey[3]);\n" \
" x[2] -= (((x[3] << 4) + ikey[0]) ^ (x[3] + sum)) ^ ((x[3] >> 5) + ikey[1]);\n" \
" sum -= delta; \n" \
" }\n" \
" odata = x; \n" \
"}\n"
const char *teatime_encrypt_source()
{
return TEA_ENCRYPT_SOURCE;
}
const char *teatime_decrypt_source()
{
return TEA_DECRYPT_SOURCE;
}
| stealthylabs/teatime | teatime.c | C | mit | 19,398 |
#pragma config(Sensor, in1, linefollower, sensorLineFollower)
#pragma config(Sensor, dgtl5, OutputBeltSonar, sensorSONAR_mm)
#pragma config(Motor, port6, WhipCreamMotor, tmotorVex393, openLoop)
#pragma config(Motor, port7, InputBeltMotor, tmotorServoContinuousRotation, openLoop)
#pragma config(Motor, port8, ElevatorMotor, tmotorServoContinuousRotation, openLoop)
#pragma config(Motor, port9, OutputBeltMotor, tmotorServoContinuousRotation, openLoop)
//*!!Code automatically generated by 'ROBOTC' configuration wizard !!*//
/*
Project Title: Cookie Maker
Team Members: Patrick Kubiak
Date:
Section:
Task Description: Control cookie maker machine
Pseudocode:
Move input conveior belt set distance
Move elevator set distance
Move output conveior belt until whip cream
Press whip cream
Reset whip cream
Move output conveior belt to end
Reset elevator
*/
task main()
{ //Program begins, insert code within curly braces
while (true)
{
//Input Conveior Belt
startMotor(InputBeltMotor, 127);
wait(2.5);
stopMotor(InputBeltMotor);
//Elevator
startMotor(ElevatorMotor, 127);
wait(1.5);
stopMotor(ElevatorMotor);
//Move Cookie To line follower
do
{
startMotor(OutputBeltMotor, -127);
}
while(SensorValue(linefollower) > 2900);
stopMotor(OutputBeltMotor);
//Reset Elevator
startMotor(ElevatorMotor, -127);
wait(2);
stopMotor(ElevatorMotor);
//Move Cookie To Whip Cream
startMotor(OutputBeltMotor, -127);
wait(0.4);
stopMotor(OutputBeltMotor);
//Whip Cream Press
startMotor(WhipCreamMotor, -127);
wait(1);
stopMotor(WhipCreamMotor);
//Whip Cream Reset
startMotor(WhipCreamMotor, 127);
wait(0.9);
stopMotor(WhipCreamMotor);
//Output Conveior Belt
startMotor(OutputBeltMotor, -127);
wait(2);
}
}
| patkub/pltw-vex-robotc | CookieMaker_Sensor.c | C | mit | 1,901 |
/**
* Reverb for the OpenAL cross platform audio library
* Copyright (C) 2008-2009 by Christopher Fitzgerald.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include <math.h>
#include <stdlib.h>
#include "AL/al.h"
#include "AL/alc.h"
#include "alMain.h"
#include "alAuxEffectSlot.h"
#include "alEffect.h"
#include "alError.h"
#include "alu.h"
typedef struct DelayLine
{
// The delay lines use sample lengths that are powers of 2 to allow
// bitmasking instead of modulus wrapping.
ALuint Mask;
ALfloat *Line;
} DelayLine;
typedef struct ALverbState {
// Must be first in all effects!
ALeffectState state;
// All delay lines are allocated as a single buffer to reduce memory
// fragmentation and management code.
ALfloat *SampleBuffer;
// Master effect low-pass filter (2 chained 1-pole filters).
FILTER LpFilter;
ALfloat LpHistory[2];
// Initial effect delay and decorrelation.
DelayLine Delay;
// The tap points for the initial delay. First tap goes to early
// reflections, the last four decorrelate to late reverb.
ALuint Tap[5];
struct {
// Total gain for early reflections.
ALfloat Gain;
// Early reflections are done with 4 delay lines.
ALfloat Coeff[4];
DelayLine Delay[4];
ALuint Offset[4];
// The gain for each output channel based on 3D panning.
ALfloat PanGain[OUTPUTCHANNELS];
} Early;
struct {
// Total gain for late reverb.
ALfloat Gain;
// Attenuation to compensate for modal density and decay rate.
ALfloat DensityGain;
// The feed-back and feed-forward all-pass coefficient.
ALfloat ApFeedCoeff;
// Mixing matrix coefficient.
ALfloat MixCoeff;
// Late reverb has 4 parallel all-pass filters.
ALfloat ApCoeff[4];
DelayLine ApDelay[4];
ALuint ApOffset[4];
// In addition to 4 cyclical delay lines.
ALfloat Coeff[4];
DelayLine Delay[4];
ALuint Offset[4];
// The cyclical delay lines are 1-pole low-pass filtered.
ALfloat LpCoeff[4];
ALfloat LpSample[4];
// The gain for each output channel based on 3D panning.
ALfloat PanGain[OUTPUTCHANNELS];
} Late;
// The current read offset for all delay lines.
ALuint Offset;
} ALverbState;
// All delay line lengths are specified in seconds.
// The lengths of the early delay lines.
static const ALfloat EARLY_LINE_LENGTH[4] =
{
0.0015f, 0.0045f, 0.0135f, 0.0405f
};
// The lengths of the late all-pass delay lines.
static const ALfloat ALLPASS_LINE_LENGTH[4] =
{
0.0151f, 0.0167f, 0.0183f, 0.0200f,
};
// The lengths of the late cyclical delay lines.
static const ALfloat LATE_LINE_LENGTH[4] =
{
0.0211f, 0.0311f, 0.0461f, 0.0680f
};
// The late cyclical delay lines have a variable length dependent on the
// effect's density parameter (inverted for some reason) and this multiplier.
static const ALfloat LATE_LINE_MULTIPLIER = 4.0f;
// Input into the late reverb is decorrelated between four channels. Their
// timings are dependent on a fraction and multiplier. See VerbUpdate() for
// the calculations involved.
static const ALfloat DECO_FRACTION = 1.0f / 32.0f;
static const ALfloat DECO_MULTIPLIER = 2.0f;
// The maximum length of initial delay for the master delay line (a sum of
// the maximum early reflection and late reverb delays).
static const ALfloat MASTER_LINE_LENGTH = 0.3f + 0.1f;
// Find the next power of 2. Actually, this will return the input value if
// it is already a power of 2.
static ALuint NextPowerOf2(ALuint value)
{
ALuint powerOf2 = 1;
if(value)
{
value--;
while(value)
{
value >>= 1;
powerOf2 <<= 1;
}
}
return powerOf2;
}
// Basic delay line input/output routines.
static __inline ALfloat DelayLineOut(DelayLine *Delay, ALuint offset)
{
return Delay->Line[offset&Delay->Mask];
}
static __inline ALvoid DelayLineIn(DelayLine *Delay, ALuint offset, ALfloat in)
{
Delay->Line[offset&Delay->Mask] = in;
}
// Delay line output routine for early reflections.
static __inline ALfloat EarlyDelayLineOut(ALverbState *State, ALuint index)
{
return State->Early.Coeff[index] *
DelayLineOut(&State->Early.Delay[index],
State->Offset - State->Early.Offset[index]);
}
// Given an input sample, this function produces stereo output for early
// reflections.
static __inline ALvoid EarlyReflection(ALverbState *State, ALfloat in, ALfloat *out)
{
ALfloat d[4], v, f[4];
// Obtain the decayed results of each early delay line.
d[0] = EarlyDelayLineOut(State, 0);
d[1] = EarlyDelayLineOut(State, 1);
d[2] = EarlyDelayLineOut(State, 2);
d[3] = EarlyDelayLineOut(State, 3);
/* The following uses a lossless scattering junction from waveguide
* theory. It actually amounts to a householder mixing matrix, which
* will produce a maximally diffuse response, and means this can probably
* be considered a simple feedback delay network (FDN).
* N
* ---
* \
* v = 2/N / d_i
* ---
* i=1
*/
v = (d[0] + d[1] + d[2] + d[3]) * 0.5f;
// The junction is loaded with the input here.
v += in;
// Calculate the feed values for the delay lines.
f[0] = v - d[0];
f[1] = v - d[1];
f[2] = v - d[2];
f[3] = v - d[3];
// Refeed the delay lines.
DelayLineIn(&State->Early.Delay[0], State->Offset, f[0]);
DelayLineIn(&State->Early.Delay[1], State->Offset, f[1]);
DelayLineIn(&State->Early.Delay[2], State->Offset, f[2]);
DelayLineIn(&State->Early.Delay[3], State->Offset, f[3]);
// Output the results of the junction for all four lines.
out[0] = State->Early.Gain * f[0];
out[1] = State->Early.Gain * f[1];
out[2] = State->Early.Gain * f[2];
out[3] = State->Early.Gain * f[3];
}
// All-pass input/output routine for late reverb.
static __inline ALfloat LateAllPassInOut(ALverbState *State, ALuint index, ALfloat in)
{
ALfloat out;
out = State->Late.ApCoeff[index] *
DelayLineOut(&State->Late.ApDelay[index],
State->Offset - State->Late.ApOffset[index]);
out -= (State->Late.ApFeedCoeff * in);
DelayLineIn(&State->Late.ApDelay[index], State->Offset,
(State->Late.ApFeedCoeff * out) + in);
return out;
}
// Delay line output routine for late reverb.
static __inline ALfloat LateDelayLineOut(ALverbState *State, ALuint index)
{
return State->Late.Coeff[index] *
DelayLineOut(&State->Late.Delay[index],
State->Offset - State->Late.Offset[index]);
}
// Low-pass filter input/output routine for late reverb.
static __inline ALfloat LateLowPassInOut(ALverbState *State, ALuint index, ALfloat in)
{
State->Late.LpSample[index] = in +
((State->Late.LpSample[index] - in) * State->Late.LpCoeff[index]);
return State->Late.LpSample[index];
}
// Given four decorrelated input samples, this function produces stereo
// output for late reverb.
static __inline ALvoid LateReverb(ALverbState *State, ALfloat *in, ALfloat *out)
{
ALfloat d[4], f[4];
// Obtain the decayed results of the cyclical delay lines, and add the
// corresponding input channels attenuated by density. Then pass the
// results through the low-pass filters.
d[0] = LateLowPassInOut(State, 0, (State->Late.DensityGain * in[0]) +
LateDelayLineOut(State, 0));
d[1] = LateLowPassInOut(State, 1, (State->Late.DensityGain * in[1]) +
LateDelayLineOut(State, 1));
d[2] = LateLowPassInOut(State, 2, (State->Late.DensityGain * in[2]) +
LateDelayLineOut(State, 2));
d[3] = LateLowPassInOut(State, 3, (State->Late.DensityGain * in[3]) +
LateDelayLineOut(State, 3));
// To help increase diffusion, run each line through an all-pass filter.
// The order of the all-pass filters is selected so that the shortest
// all-pass filter will feed the shortest delay line.
d[0] = LateAllPassInOut(State, 1, d[0]);
d[1] = LateAllPassInOut(State, 3, d[1]);
d[2] = LateAllPassInOut(State, 0, d[2]);
d[3] = LateAllPassInOut(State, 2, d[3]);
/* Late reverb is done with a modified feedback delay network (FDN)
* topology. Four input lines are each fed through their own all-pass
* filter and then into the mixing matrix. The four outputs of the
* mixing matrix are then cycled back to the inputs. Each output feeds
* a different input to form a circlular feed cycle.
*
* The mixing matrix used is a 4D skew-symmetric rotation matrix derived
* using a single unitary rotational parameter:
*
* [ d, a, b, c ] 1 = a^2 + b^2 + c^2 + d^2
* [ -a, d, c, -b ]
* [ -b, -c, d, a ]
* [ -c, b, -a, d ]
*
* The rotation is constructed from the effect's diffusion parameter,
* yielding: 1 = x^2 + 3 y^2; where a, b, and c are the coefficient y
* with differing signs, and d is the coefficient x. The matrix is thus:
*
* [ x, y, -y, y ] x = 1 - (0.5 diffusion^3)
* [ -y, x, y, y ] y = sqrt((1 - x^2) / 3)
* [ y, -y, x, y ]
* [ -y, -y, -y, x ]
*
* To reduce the number of multiplies, the x coefficient is applied with
* the cyclical delay line coefficients. Thus only the y coefficient is
* applied when mixing, and is modified to be: y / x.
*/
f[0] = d[0] + (State->Late.MixCoeff * ( d[1] - d[2] + d[3]));
f[1] = d[1] + (State->Late.MixCoeff * (-d[0] + d[2] + d[3]));
f[2] = d[2] + (State->Late.MixCoeff * ( d[0] - d[1] + d[3]));
f[3] = d[3] + (State->Late.MixCoeff * (-d[0] - d[1] - d[2]));
// Output the results of the matrix for all four cyclical delay lines,
// attenuated by the late reverb gain (which is attenuated by the 'x'
// mix coefficient).
out[0] = State->Late.Gain * f[0];
out[1] = State->Late.Gain * f[1];
out[2] = State->Late.Gain * f[2];
out[3] = State->Late.Gain * f[3];
// The delay lines are fed circularly in the order:
// 0 -> 1 -> 3 -> 2 -> 0 ...
DelayLineIn(&State->Late.Delay[0], State->Offset, f[2]);
DelayLineIn(&State->Late.Delay[1], State->Offset, f[0]);
DelayLineIn(&State->Late.Delay[2], State->Offset, f[3]);
DelayLineIn(&State->Late.Delay[3], State->Offset, f[1]);
}
// Process the reverb for a given input sample, resulting in separate four-
// channel output for both early reflections and late reverb.
static __inline ALvoid ReverbInOut(ALverbState *State, ALfloat in, ALfloat *early, ALfloat *late)
{
ALfloat taps[4];
// Low-pass filter the incoming sample.
in = lpFilter2P(&State->LpFilter, 0, in);
// Feed the initial delay line.
DelayLineIn(&State->Delay, State->Offset, in);
// Calculate the early reflection from the first delay tap.
in = DelayLineOut(&State->Delay, State->Offset - State->Tap[0]);
EarlyReflection(State, in, early);
// Calculate the late reverb from the last four delay taps.
taps[0] = DelayLineOut(&State->Delay, State->Offset - State->Tap[1]);
taps[1] = DelayLineOut(&State->Delay, State->Offset - State->Tap[2]);
taps[2] = DelayLineOut(&State->Delay, State->Offset - State->Tap[3]);
taps[3] = DelayLineOut(&State->Delay, State->Offset - State->Tap[4]);
LateReverb(State, taps, late);
// Step all delays forward one sample.
State->Offset++;
}
// This destroys the reverb state. It should be called only when the effect
// slot has a different (or no) effect loaded over the reverb effect.
ALvoid VerbDestroy(ALeffectState *effect)
{
ALverbState *State = (ALverbState*)effect;
if(State)
{
free(State->SampleBuffer);
State->SampleBuffer = NULL;
free(State);
}
}
// NOTE: Temp, remove later.
static __inline ALint aluCart2LUTpos(ALfloat re, ALfloat im)
{
ALint pos = 0;
ALfloat denom = aluFabs(re) + aluFabs(im);
if(denom > 0.0f)
pos = (ALint)(QUADRANT_NUM*aluFabs(im) / denom + 0.5);
if(re < 0.0)
pos = 2 * QUADRANT_NUM - pos;
if(im < 0.0)
pos = LUT_NUM - pos;
return pos%LUT_NUM;
}
// This updates the reverb state. This is called any time the reverb effect
// is loaded into a slot.
ALvoid VerbUpdate(ALeffectState *effect, ALCcontext *Context, ALeffect *Effect)
{
ALverbState *State = (ALverbState*)effect;
ALuint index;
ALfloat length, mixCoeff, cw, g, coeff;
ALfloat hfRatio = Effect->Reverb.DecayHFRatio;
// Calculate the master low-pass filter (from the master effect HF gain).
cw = cos(2.0 * M_PI * Effect->Reverb.HFReference / Context->Frequency);
g = __max(Effect->Reverb.GainHF, 0.0001f);
State->LpFilter.coeff = 0.0f;
if(g < 0.9999f) // 1-epsilon
State->LpFilter.coeff = (1 - g*cw - aluSqrt(2*g*(1-cw) - g*g*(1 - cw*cw))) / (1 - g);
// Calculate the initial delay taps.
length = Effect->Reverb.ReflectionsDelay;
State->Tap[0] = (ALuint)(length * Context->Frequency);
length += Effect->Reverb.LateReverbDelay;
/* The four inputs to the late reverb are decorrelated to smooth the
* initial reverb and reduce harsh echos. The timings are calculated as
* multiples of a fraction of the smallest cyclical delay time. This
* result is then adjusted so that the first tap occurs immediately (all
* taps are reduced by the shortest fraction).
*
* offset[index] = ((FRACTION MULTIPLIER^index) - 1) delay
*/
for(index = 0;index < 4;index++)
{
length += LATE_LINE_LENGTH[0] *
(1.0f + (Effect->Reverb.Density * LATE_LINE_MULTIPLIER)) *
(DECO_FRACTION * (pow(DECO_MULTIPLIER, (ALfloat)index) - 1.0f));
State->Tap[1 + index] = (ALuint)(length * Context->Frequency);
}
// Calculate the early reflections gain (from the master effect gain, and
// reflections gain parameters).
State->Early.Gain = Effect->Reverb.Gain * Effect->Reverb.ReflectionsGain;
// Calculate the gain (coefficient) for each early delay line.
for(index = 0;index < 4;index++)
State->Early.Coeff[index] = pow(10.0f, EARLY_LINE_LENGTH[index] /
Effect->Reverb.LateReverbDelay *
-60.0f / 20.0f);
// Calculate the first mixing matrix coefficient (x).
mixCoeff = 1.0f - (0.5f * pow(Effect->Reverb.Diffusion, 3.0f));
// Calculate the late reverb gain (from the master effect gain, and late
// reverb gain parameters). Since the output is tapped prior to the
// application of the delay line coefficients, this gain needs to be
// attenuated by the 'x' mix coefficient from above.
State->Late.Gain = Effect->Reverb.Gain * Effect->Reverb.LateReverbGain * mixCoeff;
/* To compensate for changes in modal density and decay time of the late
* reverb signal, the input is attenuated based on the maximal energy of
* the outgoing signal. This is calculated as the ratio between a
* reference value and the current approximation of energy for the output
* signal.
*
* Reverb output matches exponential decay of the form Sum(a^n), where a
* is the attenuation coefficient, and n is the sample ranging from 0 to
* infinity. The signal energy can thus be approximated using the area
* under this curve, calculated as: 1 / (1 - a).
*
* The reference energy is calculated from a signal at the lowest (effect
* at 1.0) density with a decay time of one second.
*
* The coefficient is calculated as the average length of the cyclical
* delay lines. This produces a better result than calculating the gain
* for each line individually (most likely a side effect of diffusion).
*
* The final result is the square root of the ratio bound to a maximum
* value of 1 (no amplification).
*/
length = (LATE_LINE_LENGTH[0] + LATE_LINE_LENGTH[1] +
LATE_LINE_LENGTH[2] + LATE_LINE_LENGTH[3]);
g = length * (1.0f + LATE_LINE_MULTIPLIER) * 0.25f;
g = pow(10.0f, g * -60.0f / 20.0f);
g = 1.0f / (1.0f - (g * g));
length *= 1.0f + (Effect->Reverb.Density * LATE_LINE_MULTIPLIER) * 0.25f;
length = pow(10.0f, length / Effect->Reverb.DecayTime * -60.0f / 20.0f);
length = 1.0f / (1.0f - (length * length));
State->Late.DensityGain = __min(aluSqrt(g / length), 1.0f);
// Calculate the all-pass feed-back and feed-forward coefficient.
State->Late.ApFeedCoeff = 0.6f * pow(Effect->Reverb.Diffusion, 3.0f);
// Calculate the mixing matrix coefficient (y / x).
g = aluSqrt((1.0f - (mixCoeff * mixCoeff)) / 3.0f);
State->Late.MixCoeff = g / mixCoeff;
for(index = 0;index < 4;index++)
{
// Calculate the gain (coefficient) for each all-pass line.
State->Late.ApCoeff[index] = pow(10.0f, ALLPASS_LINE_LENGTH[index] /
Effect->Reverb.DecayTime *
-60.0f / 20.0f);
}
// If the HF limit parameter is flagged, calculate an appropriate limit
// based on the air absorption parameter.
if(Effect->Reverb.DecayHFLimit && Effect->Reverb.AirAbsorptionGainHF < 1.0f)
{
ALfloat limitRatio;
// For each of the cyclical delays, find the attenuation due to air
// absorption in dB (converting delay time to meters using the speed
// of sound). Then reversing the decay equation, solve for HF ratio.
// The delay length is cancelled out of the equation, so it can be
// calculated once for all lines.
limitRatio = 1.0f / (log10(Effect->Reverb.AirAbsorptionGainHF) *
SPEEDOFSOUNDMETRESPERSEC *
Effect->Reverb.DecayTime / -60.0f * 20.0f);
// Need to limit the result to a minimum of 0.1, just like the HF
// ratio parameter.
limitRatio = __max(limitRatio, 0.1f);
// Using the limit calculated above, apply the upper bound to the
// HF ratio.
hfRatio = __min(hfRatio, limitRatio);
}
// Calculate the low-pass filter frequency.
cw = cos(2.0f * M_PI * Effect->Reverb.HFReference / Context->Frequency);
for(index = 0;index < 4;index++)
{
// Calculate the length (in seconds) of each cyclical delay line.
length = LATE_LINE_LENGTH[index] * (1.0f + (Effect->Reverb.Density *
LATE_LINE_MULTIPLIER));
// Calculate the delay offset for the cyclical delay lines.
State->Late.Offset[index] = (ALuint)(length * Context->Frequency);
// Calculate the gain (coefficient) for each cyclical line.
State->Late.Coeff[index] = pow(10.0f, length / Effect->Reverb.DecayTime *
-60.0f / 20.0f);
// Eventually this should boost the high frequencies when the ratio
// exceeds 1.
coeff = 0.0f;
if (hfRatio < 1.0f)
{
// Calculate the decay equation for each low-pass filter.
g = pow(10.0f, length / (Effect->Reverb.DecayTime * hfRatio) *
-60.0f / 20.0f) / State->Late.Coeff[index];
g = __max(g, 0.1f);
g *= g;
// Calculate the gain (coefficient) for each low-pass filter.
if(g < 0.9999f) // 1-epsilon
coeff = (1 - g*cw - aluSqrt(2*g*(1-cw) - g*g*(1 - cw*cw))) / (1 - g);
// Very low decay times will produce minimal output, so apply an
// upper bound to the coefficient.
coeff = __min(coeff, 0.98f);
}
State->Late.LpCoeff[index] = coeff;
// Attenuate the cyclical line coefficients by the mixing coefficient
// (x).
State->Late.Coeff[index] *= mixCoeff;
}
// Calculate the 3D-panning gains for the early reflections and late
// reverb (for EAX mode).
{
ALfloat earlyPan[3] = { Effect->Reverb.ReflectionsPan[0], Effect->Reverb.ReflectionsPan[1], Effect->Reverb.ReflectionsPan[2] };
ALfloat latePan[3] = { Effect->Reverb.LateReverbPan[0], Effect->Reverb.LateReverbPan[1], Effect->Reverb.LateReverbPan[2] };
ALfloat *speakerGain, dirGain, ambientGain;
ALfloat length;
ALint pos;
length = earlyPan[0]*earlyPan[0] + earlyPan[1]*earlyPan[1] + earlyPan[2]*earlyPan[2];
if(length > 1.0f)
{
length = 1.0f / aluSqrt(length);
earlyPan[0] *= length;
earlyPan[1] *= length;
earlyPan[2] *= length;
}
length = latePan[0]*latePan[0] + latePan[1]*latePan[1] + latePan[2]*latePan[2];
if(length > 1.0f)
{
length = 1.0f / aluSqrt(length);
latePan[0] *= length;
latePan[1] *= length;
latePan[2] *= length;
}
// This code applies directional reverb just like the mixer applies
// directional sources. It diffuses the sound toward all speakers
// as the magnitude of the panning vector drops, which is only an
// approximation of the expansion of sound across the speakers from
// the panning direction.
pos = aluCart2LUTpos(earlyPan[2], earlyPan[0]);
speakerGain = &Context->PanningLUT[OUTPUTCHANNELS * pos];
dirGain = aluSqrt((earlyPan[0] * earlyPan[0]) + (earlyPan[2] * earlyPan[2]));
ambientGain = (1.0 - dirGain);
for(index = 0;index < OUTPUTCHANNELS;index++)
State->Early.PanGain[index] = dirGain * speakerGain[index] + ambientGain;
pos = aluCart2LUTpos(latePan[2], latePan[0]);
speakerGain = &Context->PanningLUT[OUTPUTCHANNELS * pos];
dirGain = aluSqrt((latePan[0] * latePan[0]) + (latePan[2] * latePan[2]));
ambientGain = (1.0 - dirGain);
for(index = 0;index < OUTPUTCHANNELS;index++)
State->Late.PanGain[index] = dirGain * speakerGain[index] + ambientGain;
}
}
// This processes the reverb state, given the input samples and an output
// buffer.
ALvoid VerbProcess(ALeffectState *effect, const ALeffectslot *Slot, ALuint SamplesToDo, const ALfloat *SamplesIn, ALfloat (*SamplesOut)[OUTPUTCHANNELS])
{
ALverbState *State = (ALverbState*)effect;
ALuint index;
ALfloat early[4], late[4], out[4];
ALfloat gain = Slot->Gain;
for(index = 0;index < SamplesToDo;index++)
{
// Process reverb for this sample.
ReverbInOut(State, SamplesIn[index], early, late);
// Mix early reflections and late reverb.
out[0] = (early[0] + late[0]) * gain;
out[1] = (early[1] + late[1]) * gain;
out[2] = (early[2] + late[2]) * gain;
out[3] = (early[3] + late[3]) * gain;
// Output the results.
SamplesOut[index][FRONT_LEFT] += out[0];
SamplesOut[index][FRONT_RIGHT] += out[1];
SamplesOut[index][FRONT_CENTER] += out[3];
SamplesOut[index][SIDE_LEFT] += out[0];
SamplesOut[index][SIDE_RIGHT] += out[1];
SamplesOut[index][BACK_LEFT] += out[0];
SamplesOut[index][BACK_RIGHT] += out[1];
SamplesOut[index][BACK_CENTER] += out[2];
}
}
// This processes the EAX reverb state, given the input samples and an output
// buffer.
ALvoid EAXVerbProcess(ALeffectState *effect, const ALeffectslot *Slot, ALuint SamplesToDo, const ALfloat *SamplesIn, ALfloat (*SamplesOut)[OUTPUTCHANNELS])
{
ALverbState *State = (ALverbState*)effect;
ALuint index;
ALfloat early[4], late[4];
ALfloat gain = Slot->Gain;
for(index = 0;index < SamplesToDo;index++)
{
// Process reverb for this sample.
ReverbInOut(State, SamplesIn[index], early, late);
// Unfortunately, while the number and configuration of gains for
// panning adjust according to OUTPUTCHANNELS, the output from the
// reverb engine is not so scalable.
SamplesOut[index][FRONT_LEFT] +=
(State->Early.PanGain[FRONT_LEFT]*early[0] +
State->Late.PanGain[FRONT_LEFT]*late[0]) * gain;
SamplesOut[index][FRONT_RIGHT] +=
(State->Early.PanGain[FRONT_RIGHT]*early[1] +
State->Late.PanGain[FRONT_RIGHT]*late[1]) * gain;
SamplesOut[index][FRONT_CENTER] +=
(State->Early.PanGain[FRONT_CENTER]*early[3] +
State->Late.PanGain[FRONT_CENTER]*late[3]) * gain;
SamplesOut[index][SIDE_LEFT] +=
(State->Early.PanGain[SIDE_LEFT]*early[0] +
State->Late.PanGain[SIDE_LEFT]*late[0]) * gain;
SamplesOut[index][SIDE_RIGHT] +=
(State->Early.PanGain[SIDE_RIGHT]*early[1] +
State->Late.PanGain[SIDE_RIGHT]*late[1]) * gain;
SamplesOut[index][BACK_LEFT] +=
(State->Early.PanGain[BACK_LEFT]*early[0] +
State->Late.PanGain[BACK_LEFT]*late[0]) * gain;
SamplesOut[index][BACK_RIGHT] +=
(State->Early.PanGain[BACK_RIGHT]*early[1] +
State->Late.PanGain[BACK_RIGHT]*late[1]) * gain;
SamplesOut[index][BACK_CENTER] +=
(State->Early.PanGain[BACK_CENTER]*early[2] +
State->Late.PanGain[BACK_CENTER]*late[2]) * gain;
}
}
// This creates the reverb state. It should be called only when the reverb
// effect is loaded into a slot that doesn't already have a reverb effect.
ALeffectState *VerbCreate(ALCcontext *Context)
{
ALverbState *State = NULL;
ALuint samples, length[13], totalLength, index;
State = malloc(sizeof(ALverbState));
if(!State)
{
alSetError(AL_OUT_OF_MEMORY);
return NULL;
}
State->state.Destroy = VerbDestroy;
State->state.Update = VerbUpdate;
State->state.Process = VerbProcess;
// All line lengths are powers of 2, calculated from their lengths, with
// an additional sample in case of rounding errors.
// See VerbUpdate() for an explanation of the additional calculation
// added to the master line length.
samples = (ALuint)
((MASTER_LINE_LENGTH +
(LATE_LINE_LENGTH[0] * (1.0f + LATE_LINE_MULTIPLIER) *
(DECO_FRACTION * ((DECO_MULTIPLIER * DECO_MULTIPLIER *
DECO_MULTIPLIER) - 1.0f)))) *
Context->Frequency) + 1;
length[0] = NextPowerOf2(samples);
totalLength = length[0];
for(index = 0;index < 4;index++)
{
samples = (ALuint)(EARLY_LINE_LENGTH[index] * Context->Frequency) + 1;
length[1 + index] = NextPowerOf2(samples);
totalLength += length[1 + index];
}
for(index = 0;index < 4;index++)
{
samples = (ALuint)(ALLPASS_LINE_LENGTH[index] * Context->Frequency) + 1;
length[5 + index] = NextPowerOf2(samples);
totalLength += length[5 + index];
}
for(index = 0;index < 4;index++)
{
samples = (ALuint)(LATE_LINE_LENGTH[index] *
(1.0f + LATE_LINE_MULTIPLIER) * Context->Frequency) + 1;
length[9 + index] = NextPowerOf2(samples);
totalLength += length[9 + index];
}
// All lines share a single sample buffer and have their masks and start
// addresses calculated once.
State->SampleBuffer = malloc(totalLength * sizeof(ALfloat));
if(!State->SampleBuffer)
{
free(State);
alSetError(AL_OUT_OF_MEMORY);
return NULL;
}
for(index = 0; index < totalLength;index++)
State->SampleBuffer[index] = 0.0f;
State->LpFilter.coeff = 0.0f;
State->LpFilter.history[0] = 0.0f;
State->LpFilter.history[1] = 0.0f;
State->Delay.Mask = length[0] - 1;
State->Delay.Line = &State->SampleBuffer[0];
totalLength = length[0];
State->Tap[0] = 0;
State->Tap[1] = 0;
State->Tap[2] = 0;
State->Tap[3] = 0;
State->Tap[4] = 0;
State->Early.Gain = 0.0f;
for(index = 0;index < 4;index++)
{
State->Early.Coeff[index] = 0.0f;
State->Early.Delay[index].Mask = length[1 + index] - 1;
State->Early.Delay[index].Line = &State->SampleBuffer[totalLength];
totalLength += length[1 + index];
// The early delay lines have their read offsets calculated once.
State->Early.Offset[index] = (ALuint)(EARLY_LINE_LENGTH[index] *
Context->Frequency);
}
State->Late.Gain = 0.0f;
State->Late.DensityGain = 0.0f;
State->Late.ApFeedCoeff = 0.0f;
State->Late.MixCoeff = 0.0f;
for(index = 0;index < 4;index++)
{
State->Late.ApCoeff[index] = 0.0f;
State->Late.ApDelay[index].Mask = length[5 + index] - 1;
State->Late.ApDelay[index].Line = &State->SampleBuffer[totalLength];
totalLength += length[5 + index];
// The late all-pass lines have their read offsets calculated once.
State->Late.ApOffset[index] = (ALuint)(ALLPASS_LINE_LENGTH[index] *
Context->Frequency);
}
for(index = 0;index < 4;index++)
{
State->Late.Coeff[index] = 0.0f;
State->Late.Delay[index].Mask = length[9 + index] - 1;
State->Late.Delay[index].Line = &State->SampleBuffer[totalLength];
totalLength += length[9 + index];
State->Late.Offset[index] = 0;
State->Late.LpCoeff[index] = 0.0f;
State->Late.LpSample[index] = 0.0f;
}
// Panning is applied as an independent gain for each output channel.
for(index = 0;index < OUTPUTCHANNELS;index++)
{
State->Early.PanGain[index] = 0.0f;
State->Late.PanGain[index] = 0.0f;
}
State->Offset = 0;
return &State->state;
}
ALeffectState *EAXVerbCreate(ALCcontext *Context)
{
ALeffectState *State = VerbCreate(Context);
if(State) State->Process = EAXVerbProcess;
return State;
}
| ghoulsblade/vegaogre | lugre/baselib/openal-soft-1.8.466/Alc/alcReverb.c | C | mit | 31,015 |
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <assert.h>
#include <sys/epoll.h>
#include "reactor.h"
struct state
{
reactor_handler input;
reactor_handler output;
char buffer[4096];
data remaining;
};
int fill(struct state *state)
{
ssize_t n;
n = read(0, state->buffer, sizeof state->buffer);
if (n == 0)
{
reactor_delete(&state->input, 0);
reactor_delete(&state->output, 1);
return -1;
}
if (n == -1 && errno == EAGAIN)
return -1;
assert(n > 0);
state->remaining = data_construct(state->buffer, n);
reactor_modify(&state->input, 0, 0);
reactor_modify(&state->output, 1, EPOLLOUT | EPOLLET);
return 0;
}
int flush(struct state *state)
{
ssize_t n;
n = write(1, data_base(state->remaining), data_size(state->remaining));
if (n == -1 && errno == EAGAIN)
return -1;
assert(n > 0);
state->remaining = data_select(state->remaining, n, data_size(state->remaining) - n);
if (!data_size(state->remaining))
{
reactor_modify(&state->input, 0, EPOLLIN | EPOLLET);
reactor_modify(&state->output, 1, 0);
}
return 0;
}
void input(reactor_event *event)
{
struct state *state = event->state;
int e;
while (!data_size(state->remaining))
{
e = fill(state);
if (e == -1)
break;
e = flush(state);
if (e == -1)
break;
}
}
void output(reactor_event *event)
{
struct state *state = event->state;
int e;
while (data_size(state->remaining))
{
e = flush(state);
if (e == -1)
break;
}
}
int main()
{
struct state state = {0};
fcntl(0, F_SETFL, O_NONBLOCK);
fcntl(1, F_SETFL, O_NONBLOCK);
reactor_construct();
reactor_handler_construct(&state.input, input, &state);
reactor_handler_construct(&state.output, output, &state);
reactor_add(&state.input, 0, EPOLLIN);
reactor_add(&state.output, 1, EPOLLOUT);
reactor_loop();
reactor_destruct();
}
| fredrikwidlund/libreactor | example/fd.c | C | mit | 1,952 |
/*
* Search first occurence of a particular string in a given text [Finite Automata]
* Author: Progyan Bhattacharya <progyanb@acm.org>
* Repo: Design-And-Analysis-of-Algorithm [MIT LICENSE]
*/
#include "Search.h"
static int NextState(int m, char* pattern, int state, int symbol) {
if (state < m && pattern[state] == symbol) {
return state + 1;
}
for (int next = state, prev = next - 1, i = 0; next > 0; next--) {
if (pattern[prev] == symbol) {
for (i = 0; i < prev; i++) {
if (pattern[i] != pattern[state - next + 1 + i]) {
break;
}
}
if (i == prev) {
return next;
}
}
}
return 0;
}
static void GenerateTable(int m, char* pattern, int Table[m][CHAR_MAX]) {
for (int state = 0, symbol = 0; symbol < CHAR_MAX || (symbol = 0, ++state) < m; symbol++) {
Table[state][symbol] = NextState(m, pattern, state, symbol);
}
}
int Search(int n, char* haystack, int m, char* needle) {
int Table[m + 1][CHAR_MAX], state = 0;
GenerateTable(m + 1, needle, Table);
for (int i = 0; i < n; i++) {
state = Table[state][haystack[i]];
if (state == m) {
return (i - m + 1);
}
}
return -1;
}
| Progyan1997/Design-and-Analysis-of-Algorithm | String Search/Finite Automata/Search.c | C | mit | 1,307 |
/****************************************************************
Copyright (C) 2014 All rights reserved.
> File Name: < echo_server.c >
> Author: < Sean Guo >
> Mail: < iseanxp+code@gmail.com >
> Created Time: < 2014/06/19 >
> Last Changed: < 2015/11/30 >
> Description: echo server for ARM
//{{{
int bind(int sockfd, struct sockaddr * my_addr, int addrlen);
bind()用来设置给参数sockfd 的socket 一个名称.
此名称由参数my_addr 指向一个sockaddr 结构, 对于不同的socket domain 定义了一个通用的数据结构
struct sockaddr
{
unsigned short int sa_family;
char sa_data[14];
};
1、sa_family 为调用socket()时的domain 参数, 即AF_xxxx 值.
2、sa_data 最多使用14 个字符长度.
此sockaddr 结构会因使用不同的socket domain 而有不同结构定义,
例如使用AF_INET domain,其socketaddr 结构定义便为
struct socketaddr_in
{
unsigned short int sin_family;
uint16_t sin_port;
struct in_addr sin_addr;
unsigned char sin_zero[8];
};
struct in_addr
{
uint32_t s_addr;
};
1、sin_family 即为sa_family
2、sin_port 为使用的port 编号
3、sin_addr. s_addr 为IP 地址 sin_zero 未使用.
参数 addrlen 为sockaddr 的结构长度.
返回值:成功则返回0, 失败返回-1, 错误原因存于errno 中.
错误代码:
1、EBADF 参数sockfd 非合法socket 处理代码.
2、EACCESS 权限不足
3、ENOTSOCK 参数sockfd 为一文件描述词, 非socket.
//}}}
Usage: ./echo_server
****************************************************************/
//{{{ include files
#include <stdio.h>
#include <sys/socket.h>
#include <sys/wait.h> // waitpid()
#include <stdlib.h> // exit();
#include <string.h> // bzero();
#include <netinet/in.h> // struct sockaddr_in;
#include <time.h> // time();
#include <arpa/inet.h> // inet_pton();
#include <unistd.h> // write();
#include <errno.h> // errno
#include <signal.h> // SIGCHLD
//}}}
#define MAXLINE 4096 /* max text line length */
#define LISTENQ 1024 /* 2nd argument to listen() , 排队的最大连接数*/
#define LISTEN_PORT 9669 //服务器监听端口
//参数: 已连接的socket描述符.
//功能: echo此socket发送的一切数据;
//阻塞函数, 直到对方socket关闭.
void str_echo(int sockfd);
//信号处理函数, 将等待一个子进程的结束。
void sig_child(int signo);
int main(int argc, char **argv)
//{{{
{
int listenfd, connfd;
struct sockaddr_in server_addr, client_addr;
socklen_t addr_len;
char buffer[MAXLINE];
pid_t child_pid;
listenfd = socket(AF_INET, SOCK_STREAM, 0);
bzero(&server_addr, sizeof(server_addr));
server_addr.sin_family = AF_INET;
server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
server_addr.sin_port = htons(LISTEN_PORT);
// int bind(int sockfd, struct sockaddr * my_addr, int addrlen);
// bind()用来设置给参数sockfd 的socket 一个名称.
// 此名称由参数my_addr 指向一个sockaddr 结构, 对于不同的socket domain 定义了一个通用的数据结构
bind(listenfd, (struct sockaddr *) &server_addr, sizeof(server_addr));
// int listen(int s, int backlog);
// listen()用来监听描述符s 的socket连接请求.
// 参数backlog 指定同时能处理的最大连接要求, 如果连接数目达此上限则client 端将收到ECONNREFUSED 的错误.
// listen()并未开始接收连接请求, 只设置socket 为listen 模式,真正接收client 端连线的是accept().
// 通常listen()会在socket(), bind()之后调用, 接着才调用accept().
// 成功则返回0, 失败返回-1, 错误原因存于errno
// listen()只适用SOCK_STREAM 或SOCK_SEQPACKET 的socket 类型.
// 如果socket 为AF_INET 则参数backlog 最大值可设至128.
listen(listenfd, LISTENQ);
signal(SIGCHLD, sig_child); //为SIGCHLD匹配自定义的函数, 使得处理子进程僵死的问题.
//主进程就为一个监听端口, 为每个连接fork一个新的进程.
for ( ; ; ) {
addr_len = sizeof(client_addr);
// int accept(int s, struct sockaddr * addr, int * addrlen);
// accept()用来接受描述符s 的socket连接请求.
// socket 必需先经bind()、listen()函数处理过,
// 当有连接请求进来时, accept()会返回一个新的socket 处理代码, 往后的数据传送与读取就是经由新的socket处理,
// 而原来参数s 的socket 能继续使用accept()来接受新的连线要求.
// 连线成功时, 参数addr 所指的结构会被系统填入远程主机的地址数据, 参数addrlen 为scokaddr 的结构长度.
// 成功则返回新的socket 处理代码, 失败返回-1, 错误原因存于errno 中.
connfd = accept(listenfd, (struct sockaddr *) &client_addr, &addr_len);
//创建子进程处理客户端请求, 主进程继续监听.
child_pid = fork();
if(child_pid < 0) //failed to fork a process.
{
fprintf(stderr, "error: failed in fork()\n");
exit(1);
}
else if(child_pid == 0) //the child process.
{
close(listenfd); //close listenfd in child process.
str_echo(connfd); //the task of child process - As a echo server.
exit(0);
}
else // the parent process.
close(connfd); //close connfd in parent process.
//调用close()只会减少对应socket描述符的引用数, 当引用数为0才会清楚对应的socket.
}
}//}}}
void str_echo(int sockfd)
//{{{
{
ssize_t n;
char buf[1024];
again:
while( (n = read(sockfd, buf, 1024)) > 0) //不断从sockfd中读取数据
write(sockfd, buf, n);
if(n < 0 && errno == EINTR) //由于信号中断(EINTR)而没有读取到数据时, 返回while循环.
goto again;
else if( n < 0) //无法读取数据
perror("str_echo: read error");
}//}}}
//信号处理函数, 将等待一个子进程的结束。
void sig_child(int signo)
//{{{
{
pid_t pid;
int state;
//pid = wait(&state); //等待一个子进程的结束
while( (pid = waitpid(-1, &state, WNOHANG)) > 0) //使用非阻塞的waitpid等待可结束的所有子进程
printf("child pid[%d] terminated.\n", pid);
}//}}}
| SeanXP/ARM-Tiny6410 | linux/linux-example/socket_echo_server/echo_server.c | C | mit | 6,157 |
#include <compiler.h>
#if defined(CPUCORE_IA32) && defined(SUPPORT_MEMDBG32)
#include <common/strres.h>
#include <cpucore.h>
#include <pccore.h>
#include <io/iocore.h>
#include <generic/memdbg32.h>
#define MEMDBG32_MAXMEM 16
#define MEMDBG32_DATAPERLINE 128
#define MEMDBG32_LEFTMARGIN 8
typedef struct {
UINT mode;
int width;
int height;
int bpp;
CMNPAL pal[MEMDBG32_PALS];
} MEMDBG32;
static MEMDBG32 memdbg32;
static const char _mode0[] = "Real Mode";
static const char _mode1[] = "Protected Mode";
static const char _mode2[] = "Virtual86";
static const char *modestr[3] = {_mode0, _mode1, _mode2};
static const RGB32 md32pal[MEMDBG32_PALS] = {
RGB32D(0x33, 0x33, 0x33),
RGB32D(0x00, 0x00, 0x00),
RGB32D(0xff, 0xaa, 0x00),
RGB32D(0xff, 0x00, 0x00),
RGB32D(0x11, 0x88, 0x11),
RGB32D(0x00, 0xff, 0x00),
RGB32D(0xff, 0xff, 0xff)};
void memdbg32_initialize(void) {
ZeroMemory(&memdbg32, sizeof(memdbg32));
memdbg32.width = (MEMDBG32_BLOCKW * MEMDBG32_DATAPERLINE) + MEMDBG32_LEFTMARGIN;
memdbg32.height = (MEMDBG32_BLOCKH * 2 * MEMDBG32_MAXMEM) + 8;
}
void memdbg32_getsize(int *width, int *height) {
if (width) {
*width = memdbg32.width;
}
if (height) {
*height = memdbg32.height;
}
}
REG8 memdbg32_process(void) {
return(MEMDBG32_FLAGDRAW);
}
BOOL memdbg32_paint(CMNVRAM *vram, CMNPALCNV cnv, BOOL redraw) {
UINT mode;
UINT8 use[MEMDBG32_MAXMEM*MEMDBG32_DATAPERLINE*2 + 256];
UINT32 pd[1024];
UINT pdmax;
UINT i, j;
UINT32 pde;
UINT32 pdea;
UINT32 pte;
char str[4];
mode = 0;
if (CPU_STAT_PM) {
mode = 1;
}
if (CPU_STAT_VM86) {
mode = 2;
}
if (memdbg32.mode != mode) {
memdbg32.mode = mode;
redraw = TRUE;
}
if ((!redraw) && (!CPU_STAT_PAGING)) {
return(FALSE);
}
if (vram == NULL) {
return(FALSE);
}
if ((memdbg32.bpp != vram->bpp) || (redraw)) {
if (cnv == NULL) {
return(FALSE);
}
(*cnv)(memdbg32.pal, md32pal, MEMDBG32_PALS, vram->bpp);
memdbg32.bpp = vram->bpp;
}
cmndraw_fill(vram, 0, 0, memdbg32.width, memdbg32.height,
memdbg32.pal[MEMDBG32_PALBDR]);
ZeroMemory(use, sizeof(use));
if (CPU_STAT_PAGING) {
pdmax = 0;
for (i=0; i<1024; i++) {
pde = cpu_memoryread_d(CPU_STAT_PDE_BASE + (i * 4));
if (pde & CPU_PDE_PRESENT) {
for (j=0; j<pdmax; j++) {
if (!((pde ^ pd[j]) & CPU_PDE_BASEADDR_MASK)) {
break;
}
}
if (j < pdmax) {
pd[j] |= pde & CPU_PDE_ACCESS;
}
else {
pd[pdmax++] = pde;
}
}
}
for (i=0; i<pdmax; i++) {
pde = pd[i];
pdea = pde & CPU_PDE_BASEADDR_MASK;
for (j=0; j<1024; j++) {
pte = cpu_memoryread_d(pdea + (j * 4));
if ((pte & CPU_PTE_PRESENT) && (pte < 0x1000000/16*MEMDBG32_MAXMEM/128*MEMDBG32_DATAPERLINE)) {
if ((pde & CPU_PDE_ACCESS) && (pte & CPU_PTE_ACCESS)) {
use[pte >> 12] = MEMDBG32_PALPAGE1;
}
else if (!use[pte >> 12]) {
use[pte >> 12] = MEMDBG32_PALPAGE0;
}
}
}
}
}
else {
FillMemory(use, 256, MEMDBG32_PALREAL);
FillMemory(use + (0xfa0000 >> 12), (0x60000 >> 12), MEMDBG32_PALREAL);
if ((CPU_STAT_PM) && (pccore.extmem)) {
FillMemory(use + 256, MIN(MEMDBG32_DATAPERLINE * 2 * pccore.extmem, sizeof(use) - 256), MEMDBG32_PALPM);
}
}
for (i=0; i<MEMDBG32_MAXMEM*2; i++) {
for (j=0; j<MEMDBG32_DATAPERLINE; j++) {
cmndraw_fill(vram, MEMDBG32_LEFTMARGIN + j * MEMDBG32_BLOCKW, i * MEMDBG32_BLOCKH,
MEMDBG32_BLOCKW - 1, MEMDBG32_BLOCKH - 1,
memdbg32.pal[use[(i * MEMDBG32_DATAPERLINE) + j]]);
}
}
for (i=0; i<MEMDBG32_MAXMEM; i++) {
SPRINTF(str, "%x", i);
cmddraw_text8(vram, 0, i * MEMDBG32_BLOCKH * 2, str,
memdbg32.pal[MEMDBG32_PALTXT]);
}
cmddraw_text8(vram, 0, memdbg32.height - 8, modestr[mode],
memdbg32.pal[MEMDBG32_PALTXT]);
return(TRUE);
}
#endif
| AZO234/NP2kai | generic/memdbg32.c | C | mit | 3,962 |
/* crypto/cryptlib.c */
/* ====================================================================
* Copyright (c) 1998-2006 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com).
*
*/
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
/* ====================================================================
* Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
* ECDH support in OpenSSL originally developed by
* SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project.
*/
#include "internal/cryptlib.h"
#ifndef OPENSSL_NO_DEPRECATED
static unsigned long (*id_callback) (void) = 0;
#endif
static void (*threadid_callback) (CRYPTO_THREADID *) = 0;
/*
* the memset() here and in set_pointer() seem overkill, but for the sake of
* CRYPTO_THREADID_cmp() this avoids any platform silliness that might cause
* two "equal" THREADID structs to not be memcmp()-identical.
*/
void CRYPTO_THREADID_set_numeric(CRYPTO_THREADID *id, unsigned long val)
{
memset(id, 0, sizeof(*id));
id->val = val;
}
static const unsigned char hash_coeffs[] = { 3, 5, 7, 11, 13, 17, 19, 23 };
void CRYPTO_THREADID_set_pointer(CRYPTO_THREADID *id, void *ptr)
{
unsigned char *dest = (void *)&id->val;
unsigned int accum = 0;
unsigned char dnum = sizeof(id->val);
memset(id, 0, sizeof(*id));
id->ptr = ptr;
if (sizeof(id->val) >= sizeof(id->ptr)) {
/*
* 'ptr' can be embedded in 'val' without loss of uniqueness
*/
id->val = (unsigned long)id->ptr;
return;
}
/*
* hash ptr ==> val. Each byte of 'val' gets the mod-256 total of a
* linear function over the bytes in 'ptr', the co-efficients of which
* are a sequence of low-primes (hash_coeffs is an 8-element cycle) - the
* starting prime for the sequence varies for each byte of 'val' (unique
* polynomials unless pointers are >64-bit). For added spice, the totals
* accumulate rather than restarting from zero, and the index of the
* 'val' byte is added each time (position dependence). If I was a
* black-belt, I'd scan big-endian pointers in reverse to give low-order
* bits more play, but this isn't crypto and I'd prefer nobody mistake it
* as such. Plus I'm lazy.
*/
while (dnum--) {
const unsigned char *src = (void *)&id->ptr;
unsigned char snum = sizeof(id->ptr);
while (snum--)
accum += *(src++) * hash_coeffs[(snum + dnum) & 7];
accum += dnum;
*(dest++) = accum & 255;
}
}
int CRYPTO_THREADID_set_callback(void (*func) (CRYPTO_THREADID *))
{
if (threadid_callback)
return 0;
threadid_callback = func;
return 1;
}
void (*CRYPTO_THREADID_get_callback(void)) (CRYPTO_THREADID *) {
return threadid_callback;
}
void CRYPTO_THREADID_current(CRYPTO_THREADID *id)
{
if (threadid_callback) {
threadid_callback(id);
return;
}
#ifndef OPENSSL_NO_DEPRECATED
/* If the deprecated callback was set, fall back to that */
if (id_callback) {
CRYPTO_THREADID_set_numeric(id, id_callback());
return;
}
#endif
/* Else pick a backup */
#if defined(OPENSSL_SYS_WIN32)
CRYPTO_THREADID_set_numeric(id, (unsigned long)GetCurrentThreadId());
#else
/* For everything else, default to using the address of 'errno' */
CRYPTO_THREADID_set_pointer(id, (void *)&errno);
#endif
}
int CRYPTO_THREADID_cmp(const CRYPTO_THREADID *a, const CRYPTO_THREADID *b)
{
return memcmp(a, b, sizeof(*a));
}
void CRYPTO_THREADID_cpy(CRYPTO_THREADID *dest, const CRYPTO_THREADID *src)
{
memcpy(dest, src, sizeof(*src));
}
unsigned long CRYPTO_THREADID_hash(const CRYPTO_THREADID *id)
{
return id->val;
}
#ifndef OPENSSL_NO_DEPRECATED
unsigned long (*CRYPTO_get_id_callback(void)) (void) {
return (id_callback);
}
void CRYPTO_set_id_callback(unsigned long (*func) (void))
{
id_callback = func;
}
unsigned long CRYPTO_thread_id(void)
{
unsigned long ret = 0;
if (id_callback == NULL) {
# if defined(OPENSSL_SYS_WIN32)
ret = (unsigned long)GetCurrentThreadId();
# elif defined(GETPID_IS_MEANINGLESS)
ret = 1L;
# else
ret = (unsigned long)getpid();
# endif
} else
ret = id_callback();
return (ret);
}
#endif
| vbloodv/blood | extern/openssl.orig/crypto/thr_id.c | C | mit | 9,864 |
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <sys/wait.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include "server.h"
#include "rio.h"
int writeTo(int fd, char * string)
{
return write(fd, string, strlen(string));
}
void err_dump(int fd, int status, char * err_msg)
{
char line[LINE_SIZE];
snprintf(line, LINE_SIZE, "HTTP/1.1 %d %s\r\n\r\n", status, err_msg);
writeTo(fd, line);
snprintf(line, LINE_SIZE, "ERROR: %d\r\n", status);
writeTo(fd, line);
snprintf(line, LINE_SIZE, "ERROR MESSAGE: %s\r\n\r\n", err_msg);
writeTo(fd, line);
}
void sig_int(int signo)
{
exit(0);
}
void sig_child(int signo)
{
signal(SIGCHLD, sig_child);
while (waitpid(-1, NULL, WNOHANG) > 0)
;
}
void initServer()
{
/* 忽略 sigpipe 信号 */
signal(SIGPIPE, SIG_IGN);
signal(SIGINT, sig_int);
signal(SIGCHLD, sig_child);
}
/* 打开监听 */
int open_listenfd(int port)
{
int sockfd, res;
struct sockaddr_in addr;
/* 创建socket */
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd < 0) {
fprintf(stderr, "socket error\n");
exit(1);
}
printf("创建socket成功\n");
/* 初始化地址 */
addr.sin_family = AF_INET;
addr.sin_port = htons(PORT);
addr.sin_addr.s_addr = htonl(INADDR_ANY);
printf("初始化地址成功\n");
/* 绑定地址 */
res = bind(sockfd, (const struct sockaddr *)&addr, sizeof(addr));
if (res < 0) {
fprintf(stderr, "bind error\n");
exit(1);
}
printf("绑定地址 %s:%d 成功\n", inet_ntoa(addr.sin_addr), PORT);
/* 监听 */
res = listen(sockfd, 50);
if (res < 0) {
fprintf(stderr, "listen error\n");
exit(1);
}
printf("监听成功\n");
return sockfd;
}
void handleUri(int fd, const char * uri)
{
char whole_uri[URI_LEN] = DOCUMENT_ROOT;
int ffd; /* 文件描述符 */
struct stat f_statue;
char * buf;
if (uri[0] == '/') {
uri += 1;
}
strncat(whole_uri, uri, URI_LEN);
if (stat(whole_uri, &f_statue) < 0) {
err_dump(fd, 404, "Not Found");
return;
}
if (! S_ISREG(f_statue.st_mode)) {
err_dump(fd, 403, "Not Regular File");
return;
}
if ((ffd = open(whole_uri, O_RDONLY)) < 0) {
err_dump(fd, 403, "Forbidden");
return;
}
buf = (char *)mmap((void *)0, f_statue.st_size, PROT_READ, MAP_PRIVATE, ffd, 0);
if (buf == MAP_FAILED) {
err_dump(fd, 501, "Mmap Error");
return;
}
writeTo(fd, "HTTP/1.1 200 OK\r\n\r\n");
writeTo(fd, buf);
}
void doit(int fd)
{
char line[LINE_SIZE];
char method[10], uri[URI_LEN], version[10];
rio_t rio;
rio_init(&rio, fd);
if (rio_readline(&rio, line, LINE_SIZE) <= 0)
{
err_dump(fd, 400, "Bad Request");
return;
}
if (sscanf(line, "%s %s %s", method, uri, version) != 3) {
err_dump(fd, 400, "Bad Request");
return;
}
while(rio_readline(&rio, line, LINE_SIZE) > 0) {
if (strcmp(line, "\r\n") == 0) {
break;
}
}
if (strcmp(method, "GET") != 0) {
err_dump(fd, 501, "No Method");
return;
}
handleUri(fd, uri);
}
int main()
{
int fd, sockfd, pid, num;
socklen_t client_len;
struct sockaddr_in client_addr;
char * client_ip;
initServer();
sockfd = open_listenfd(PORT);
num = 0;
/* 等待请求 */
while (1) {
while ((fd = accept(sockfd, (struct sockaddr *)&client_addr, &client_len)) < 0) {
if (errno != EINTR) {
/* 不是被信号处理函数中断 */
fprintf(stderr, "accept error\n");
exit(1);
}
}
++num;
client_ip = inet_ntoa(client_addr.sin_addr);
printf("请求 %d: %s\n", num, client_ip);
if ((pid = fork()) < 0) {
fprintf(stderr, "fork error\n");
exit(1);
} else if (pid == 0) {
/* child */
close(sockfd);
doit(fd);
printf("结束 %d: %s\n", num, client_ip);
exit(0);
}
close(fd);
}
return 0;
}
| cheniison/Experiment | OS/WebServer/server.c | C | mit | 4,428 |
#include <assert.h>
#include <math.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
// #include <antlr3.h>
#include "toml.h"
#include "toml-parser.h"
// #include "tomlParser.h"
// #include "tomlLexer.h"
struct _TOMLStringifyData {
TOMLError *error;
int bufferSize;
int bufferIndex;
char *buffer;
int tableNameDepth;
int tableNameStackSize;
TOMLString **tableNameStack;
};
int _TOML_stringify( struct _TOMLStringifyData *self, TOMLRef src );
TOMLRef TOML_alloc( TOMLType type ) {
switch ( type ) {
case TOML_TABLE:
return TOML_allocTable( NULL, NULL );
case TOML_ARRAY:
return TOML_allocArray( TOML_NOTYPE );
case TOML_STRING:
return TOML_allocString( "" );
case TOML_INT:
return TOML_allocInt( 0 );
case TOML_DOUBLE:
return TOML_allocDouble( 0 );
case TOML_BOOLEAN:
return TOML_allocBoolean( 0 );
case TOML_DATE:
return TOML_allocEpochDate( 0 );
default:
return NULL;
}
}
TOMLTable * TOML_allocTable( TOMLString *key, TOMLRef value, ... ) {
TOMLTable *self = malloc( sizeof(TOMLTable) );
self->type = TOML_TABLE;
self->keys = TOML_allocArray( TOML_STRING, NULL );
self->values = TOML_allocArray( TOML_NOTYPE, NULL );
if ( key != NULL ) {
TOMLArray_append( self->keys, key );
TOMLArray_append( self->values, value );
} else {
return self;
}
va_list args;
va_start( args, value );
key = va_arg( args, TOMLString * );
while ( key != NULL ) {
value = va_arg( args, TOMLRef );
TOMLArray_append( self->keys, key );
TOMLArray_append( self->values, value );
key = va_arg( args, TOMLString * );
}
va_end( args );
return self;
}
TOMLArray * TOML_allocArray( TOMLType memberType, ... ) {
TOMLArray *self = malloc( sizeof(TOMLArray) );
self->type = TOML_ARRAY;
self->memberType = memberType;
self->size = 0;
self->members = NULL;
va_list args;
va_start( args, memberType );
TOMLRef member = va_arg( args, TOMLRef );
while ( member != NULL ) {
TOMLArray_append( self, member );
member = va_arg( args, TOMLRef );
}
va_end( args );
return self;
}
TOMLString * TOML_allocString( char *content ) {
int size = strlen( content );
TOMLString *self = malloc( sizeof(TOMLString) + size + 1 );
self->type = TOML_STRING;
self->size = size;
self->content[ self->size ] = 0;
strncpy( self->content, content, size );
return self;
}
TOMLString * TOML_allocStringN( char *content, int n ) {
TOMLString *self = malloc( sizeof(TOMLString) + n + 1 );
self->type = TOML_STRING;
self->size = n;
self->content[ n ] = 0;
strncpy( self->content, content, n );
return self;
}
TOMLNumber * TOML_allocInt( int value ) {
TOMLNumber *self = malloc( sizeof(TOMLNumber) );
self->type = TOML_INT;
// self->numberType = TOML_INT;
self->intValue = value;
return self;
}
TOMLNumber * TOML_allocDouble( double value ) {
TOMLNumber *self = malloc( sizeof(TOMLNumber) );
self->type = TOML_DOUBLE;
// self->numberType = TOML_DOUBLE;
self->doubleValue = value;
return self;
}
TOMLBoolean * TOML_allocBoolean( int truth ) {
TOMLBoolean *self = malloc( sizeof(TOMLBoolean) );
self->type = TOML_BOOLEAN;
self->isTrue = truth;
return self;
}
int _TOML_isLeapYear( int year ) {
if ( year % 400 == 0 ) {
return 1;
} else if ( year % 100 == 0 ) {
return 0;
} else if ( year % 4 == 0 ) {
return 1;
} else {
return 0;
}
}
TOMLDate * TOML_allocDate(
int year, int month, int day, int hour, int minute, int second
) {
TOMLDate *self = malloc( sizeof(TOMLDate) );
self->type = TOML_DATE;
self->year = year;
self->month = month;
self->day = day;
self->hour = hour;
self->minute = minute;
self->second = second;
struct tm _time = {
second,
minute,
hour,
day,
month,
year - 1900
};
// local time
time_t localEpoch = mktime( &_time );
// gm time
_time = *gmtime( &localEpoch );
time_t gmEpoch = mktime( &_time );
double diff = difftime( localEpoch, gmEpoch );
// Adjust the localEpock made by mktime to a gmt epoch.
self->sinceEpoch = localEpoch + diff;
return self;
}
TOMLDate * TOML_allocEpochDate( time_t stamp ) {
TOMLDate *self = malloc( sizeof(TOMLDate) );
self->type = TOML_DATE;
self->sinceEpoch = stamp;
struct tm _time = *gmtime( &stamp );
self->second = _time.tm_sec;
self->minute = _time.tm_min;
self->hour = _time.tm_hour;
self->day = _time.tm_mday;
self->month = _time.tm_mon;
self->year = _time.tm_year + 1900;
return self;
}
TOMLError * TOML_allocError( int code ) {
TOMLError *self = malloc( sizeof(TOMLError) );
self->type = TOML_ERROR;
self->code = code;
self->lineNo = 0;
self->line = NULL;
self->message = NULL;
self->fullDescription = NULL;
return self;
}
char * _TOML_cstringCopy( char *str ) {
if ( !str ) {
return NULL;
}
int size = strlen( str );
char *newstr = malloc( size + 1 );
newstr[ size ] = 0;
strncpy( newstr, str, size );
return newstr;
}
TOMLRef TOML_copy( TOMLRef self ) {
TOMLBasic *basic = (TOMLBasic *) self;
if ( basic->type == TOML_TABLE ) {
TOMLTable *table = (TOMLTable *) self;
TOMLTable *newTable = malloc( sizeof(TOMLTable) );
newTable->type = TOML_TABLE;
newTable->keys = TOML_copy( table->keys );
newTable->values = TOML_copy( table->values );
return newTable;
} else if ( basic->type == TOML_ARRAY ) {
TOMLArray *array = (TOMLArray *) self;
TOMLArray *newArray = malloc( sizeof(TOMLArray) );
newArray->type = TOML_ARRAY;
newArray->memberType = array->memberType;
int i;
for ( i = 0; i < array->size; ++i ) {
TOMLArray_append(
newArray,
TOML_copy( TOMLArray_getIndex( array, i ) )
);
}
return newArray;
} else if ( basic->type == TOML_STRING ) {
TOMLString *string = (TOMLString *) self;
TOMLString *newString = malloc( sizeof(TOMLString) + string->size + 1 );
newString->type = TOML_STRING;
newString->size = string->size;
strncpy( newString->content, string->content, string->size + 1 );
return newString;
} else if ( basic->type == TOML_INT || basic->type == TOML_DOUBLE ) {
TOMLNumber *number = (TOMLNumber *) self;
TOMLNumber *newNumber = malloc( sizeof(TOMLNumber) );
newNumber->type = number->type;
// newNumber->numberType = number->numberType;
memcpy( newNumber->bytes, number->bytes, 8 );
return newNumber;
} else if ( basic->type == TOML_BOOLEAN ) {
TOMLBoolean *boolean = (TOMLBoolean *) self;
TOMLBoolean *newBoolean = malloc( sizeof(TOMLBoolean) );
newBoolean->type = boolean->type;
newBoolean->isTrue = boolean->isTrue;
return newBoolean;
} else if ( basic->type == TOML_DATE ) {
TOMLDate *date = (TOMLDate *) self;
TOMLDate *newDate = malloc( sizeof(TOMLDate) );
*newDate = *date;
return newDate;
} else if ( basic->type == TOML_ERROR ) {
TOMLError *error = (TOMLError *) self;
TOMLError *newError = malloc( sizeof(TOMLError) );
newError->type = TOML_ERROR;
newError->code = error->code;
newError->lineNo = error->lineNo;
newError->line = _TOML_cstringCopy( error->line );
newError->message = _TOML_cstringCopy( error->message );
newError->fullDescription = _TOML_cstringCopy( error->fullDescription );
return newError;
} else {
return NULL;
}
}
void TOML_free( TOMLRef self ) {
TOMLBasic *basic = (TOMLBasic *) self;
if ( basic->type == TOML_TABLE ) {
TOMLTable *table = (TOMLTable *) self;
TOML_free( table->keys );
TOML_free( table->values );
} else if ( basic->type == TOML_ARRAY ) {
TOMLArray *array = (TOMLArray *) self;
int i;
for ( i = 0; i < array->size; ++i ) {
TOML_free( array->members[ i ] );
}
free( array->members );
} else if ( basic->type == TOML_ERROR ) {
TOMLError *error = (TOMLError *) self;
free( error->line );
free( error->message );
free( error->fullDescription );
}
free( self );
}
int TOML_isType( TOMLRef self, TOMLType type ) {
TOMLBasic *basic = (TOMLBasic *) self;
return basic->type == type;
}
int TOML_isNumber( TOMLRef self ) {
TOMLBasic *basic = (TOMLBasic *) self;
return basic->type == TOML_INT || basic->type == TOML_DOUBLE;
}
TOMLRef TOML_find( TOMLRef self, ... ) {
TOMLBasic *basic = self;
va_list args;
va_start( args, self );
char *key;
do {
if ( basic->type == TOML_TABLE ) {
key = va_arg( args, char * );
if ( key == NULL ) {
break;
}
basic = self = TOMLTable_getKey( self, key );
} else if ( basic->type == TOML_ARRAY ) {
key = va_arg( args, char * );
if ( key == NULL ) {
break;
}
basic = self = TOMLArray_getIndex( self, atoi( key ) );
} else {
break;
}
} while ( self );
va_end( args );
return self;
}
TOMLRef TOMLTable_getKey( TOMLTable *self, char *key ) {
int keyLength = strlen( key );
int i;
for ( i = 0; i < self->keys->size; ++i ) {
TOMLString *tableKey = TOMLArray_getIndex( self->keys, i );
int minSize = keyLength < tableKey->size ? keyLength : tableKey->size;
if ( strncmp( tableKey->content, key, minSize + 1 ) == 0 ) {
return TOMLArray_getIndex( self->values, i );
}
}
return NULL;
}
void TOMLTable_setKey( TOMLTable *self, char *key, TOMLRef value ) {
int keyLength = strlen( key );
int i;
for ( i = 0; i < self->keys->size; ++i ) {
TOMLString *tableKey = TOMLArray_getIndex( self->keys, i );
int minSize = keyLength < tableKey->size ? keyLength : tableKey->size;
if ( strncmp( tableKey->content, key, minSize ) == 0 ) {
TOMLArray_setIndex( self->values, i, value );
return;
}
}
TOMLArray_append( self->keys, TOML_allocString( key ) );
TOMLArray_append( self->values, value );
}
TOMLRef TOMLArray_getIndex( TOMLArray *self, int index ) {
return self->members && self->size > index ? self->members[ index ] : NULL;
}
void TOMLArray_setIndex( TOMLArray *self, int index, TOMLRef value ) {
if ( index < self->size ) {
TOML_free( self->members[ index ] );
self->members[ index ] = value;
} else {
TOMLArray_append( self, value );
}
}
void TOMLArray_append( TOMLArray *self, TOMLRef value ) {
TOMLRef *oldMembers = self->members;
self->members = malloc( ( self->size + 1 ) * sizeof(TOMLRef) );
int i = 0;
for ( ; i < self->size; ++i ) {
self->members[ i ] = oldMembers[ i ];
}
// memcpy( self->members, oldMembers, self->size * sizeof(TOMLRef) );
self->members[ self->size ] = value;
self->size++;
free( oldMembers );
}
char * TOML_toString( TOMLString *self ) {
char *string = malloc( self->size + 1 );
TOML_copyString( self, self->size + 1, string );
return string;
}
#define RETURN_VALUE switch ( self->type ) { \
case TOML_INT: \
return self->intValue; \
case TOML_DOUBLE: \
return self->doubleValue; \
default: \
return 0; \
}
int TOML_toInt( TOMLNumber *self ) {
RETURN_VALUE;
}
double TOML_toDouble( TOMLNumber *self ) {
RETURN_VALUE;
}
#undef RETURN_VALUE
struct tm TOML_toTm( TOMLDate *self ) {
return *gmtime( &self->sinceEpoch );
}
int TOML_toBoolean( TOMLBoolean *self ) {
return self->isTrue;
}
TOMLToken * TOML_newToken( TOMLToken *token ) {
TOMLToken *heapToken = malloc( sizeof(TOMLToken) );
memcpy( heapToken, token, sizeof(TOMLToken) );
int size = token->end - token->start;
heapToken->tokenStr = malloc( size + 1 );
heapToken->tokenStr[ size ] = 0;
strncpy( heapToken->tokenStr, token->start, size );
return heapToken;
}
void TOML_strcpy( char *buffer, TOMLString *self, int size ) {
if ( self->type != TOML_STRING ) {
buffer[0] = 0;
} else {
strncpy(
buffer, self->content, size < self->size + 1 ? size : self->size + 1
);
}
}
char * _TOML_increaseBuffer( char *oldBuffer, int *size ) {
int newSize = *size + 1024;
char *newBuffer = malloc( newSize + 1 );
// Always have a null terminator so TOMLScan can exit without segfault.
newBuffer[ newSize ] = 0;
if ( oldBuffer ) {
strncpy( newBuffer, oldBuffer, *size + 1 );
free( oldBuffer );
}
*size = newSize;
return newBuffer;
}
int TOML_load( char *filename, TOMLTable **dest, TOMLError *error ) {
assert( *dest == NULL );
FILE *fd = fopen( filename, "r" );
if ( fd == NULL ) {
if ( error ) {
error->code = TOML_ERROR_FILEIO;
error->lineNo = -1;
error->line = NULL;
int messageSize = strlen( TOMLErrorDescription[ error->code ] );
error->message =
malloc( messageSize + 1 );
strcpy( error->message, TOMLErrorDescription[ error->code ] );
error->message[ messageSize ] = 0;
int fullDescSize = messageSize + strlen( filename ) + 8;
error->fullDescription = malloc( fullDescSize + 1 );
snprintf(
error->fullDescription,
fullDescSize,
"%s File: %s",
error->message,
filename
);
}
return TOML_ERROR_FILEIO;
}
int bufferSize = 0;
char * buffer = _TOML_increaseBuffer( NULL, &bufferSize );
int copyBufferSize = 0;
char * copyBuffer = _TOML_increaseBuffer( NULL, ©BufferSize );
int read = fread( buffer, 1, bufferSize, fd );
int incomplete = read == bufferSize;
int hTokenId;
TOMLToken token = { 0, NULL, NULL, buffer, 0, buffer, NULL };
TOMLToken lastToken = token;
TOMLTable *topTable = *dest = TOML_allocTable( NULL, NULL );
TOMLParserState state = { topTable, topTable, 0, error, &token };
pTOMLParser parser = TOMLParserAlloc( malloc );
while (
state.errorCode == 0 && (
TOMLScan( token.end, &hTokenId, &token ) || incomplete
)
) {
while ( token.end >= buffer + bufferSize && incomplete ) {
int lineSize = buffer + bufferSize - lastToken.lineStart;
if ( lastToken.lineStart == buffer ) {
int oldBufferSize = bufferSize;
strncpy( copyBuffer, lastToken.lineStart, lineSize );
buffer = _TOML_increaseBuffer( buffer, &bufferSize );
copyBuffer = _TOML_increaseBuffer( copyBuffer, ©BufferSize );
strncpy( buffer, copyBuffer, lineSize );
} else {
strncpy( copyBuffer, lastToken.lineStart, lineSize );
strncpy( buffer, copyBuffer, lineSize );
}
int read = fread( buffer + lineSize, 1, bufferSize - lineSize, fd );
incomplete = read == bufferSize - lineSize;
if ( !incomplete ) {
buffer[ lineSize + read ] = 0;
}
token = lastToken;
token.end = buffer + ( token.end - token.lineStart );
token.lineStart = buffer;
lastToken = token;
TOMLScan( token.end, &hTokenId, &token );
}
lastToken = token;
int tmpSize = token.end - token.start;
char *tmp = malloc( tmpSize + 1 );
strncpy( tmp, token.start, tmpSize );
tmp[ tmpSize ] = 0;
free( tmp );
TOMLParser( parser, hTokenId, TOML_newToken( &token ), &state );
}
if ( state.errorCode == 0 ) {
TOMLParser( parser, hTokenId, TOML_newToken( &token ), &state );
}
TOMLParserFree( parser, free );
free( copyBuffer );
free( buffer );
fclose( fd );
if ( state.errorCode != 0 ) {
TOML_free( *dest );
*dest = NULL;
return state.errorCode;
}
return 0;
}
// int TOML_dump( char *filename, TOMLTable * );
int TOML_parse( char *buffer, TOMLTable **dest, TOMLError *error ) {
assert( *dest == NULL );
int hTokenId;
TOMLToken token = { 0, NULL, NULL, buffer, 0, buffer, NULL };
TOMLTable *topTable = *dest = TOML_allocTable( NULL, NULL );
TOMLParserState state = { topTable, topTable, 0, error, &token };
pTOMLParser parser = TOMLParserAlloc( malloc );
while ( state.errorCode == 0 && TOMLScan( token.end, &hTokenId, &token ) ) {
TOMLParser( parser, hTokenId, TOML_newToken( &token ), &state );
}
if ( state.errorCode == 0 ) {
TOMLParser( parser, hTokenId, TOML_newToken( &token ), &state );
}
TOMLParserFree( parser, free );
if ( state.errorCode != 0 ) {
TOML_free( *dest );
*dest = NULL;
return state.errorCode;
}
return 0;
}
TOMLString ** _TOML_increaseNameStack(
TOMLString **nameStack, int *nameStackSize
) {
TOMLString **oldStack = nameStack;
int oldSize = *nameStackSize;
*nameStackSize += 16;
nameStack = malloc( *nameStackSize * sizeof(TOMLString *) );
if ( oldStack ) {
memcpy( nameStack, oldStack, oldSize );
free( oldStack );
}
return nameStack;
}
void _TOML_stringifyPushName(
struct _TOMLStringifyData *self, TOMLRef src
) {
if ( self->tableNameDepth >= self->tableNameStackSize ) {
self->tableNameStack = _TOML_increaseNameStack(
self->tableNameStack,
&( self->tableNameStackSize )
);
}
self->tableNameStack[ self->tableNameDepth ] = src;
self->tableNameDepth++;
}
void _TOML_stringifyPopName(
struct _TOMLStringifyData *self
) {
self->tableNameDepth--;
self->tableNameStack[ self->tableNameDepth ] = NULL;
}
void _TOML_stringifyText( struct _TOMLStringifyData *self, char *text, int n ) {
if ( self->bufferIndex + n + 1 >= self->bufferSize ) {
self->buffer = _TOML_increaseBuffer( self->buffer, &self->bufferSize );
}
strncpy( self->buffer + self->bufferIndex, text, n );
self->bufferIndex += n;
self->buffer[ self->bufferIndex ] = 0;
}
void _TOML_stringifyTableHeader(
struct _TOMLStringifyData *self, TOMLTable *table
) {
TOMLBasic *first = TOMLArray_getIndex( table->values, 0 );
if (
!first ||
first->type == TOML_TABLE || (
first->type == TOML_ARRAY &&
((TOMLArray *) first)->memberType == TOML_TABLE
)
) {
return;
}
if ( self->bufferIndex != 0 ) {
_TOML_stringifyText( self, "\n", 1 );
}
_TOML_stringifyText( self, "[", 1 );
for ( int i = 0; i < self->tableNameDepth; ++i ) {
TOMLString *tableName = self->tableNameStack[ i ];
if ( i > 0 ) {
_TOML_stringifyText( self, ".", 1 );
}
_TOML_stringifyText( self, tableName->content, tableName->size );
}
_TOML_stringifyText( self, "]\n", 2 );
}
void _TOML_stringifyArrayHeader( struct _TOMLStringifyData *self ) {
if ( self->bufferIndex != 0 ) {
_TOML_stringifyText( self, "\n", 1 );
}
_TOML_stringifyText( self, "[[", 2 );
for ( int i = 0; i < self->tableNameDepth; ++i ) {
TOMLString *tableName = self->tableNameStack[ i ];
if ( i > 0 ) {
_TOML_stringifyText( self, ".", 1 );
}
_TOML_stringifyText( self, tableName->content, tableName->size );
}
_TOML_stringifyText( self, "]]\n", 3 );
}
void _TOML_stringifyString(
struct _TOMLStringifyData *self, TOMLString *string
) {
char *cursor = string->content;
while ( cursor != NULL ) {
// Scan for escapable character or unicode.
char *next = cursor;
unsigned int ch = *next;
for ( ;
!(
ch == 0 ||
ch == '\b' ||
ch == '\t' ||
ch == '\f' ||
ch == '\n' ||
ch == '\r' ||
ch == '"' ||
ch == '/' ||
ch == '\\' ||
ch > 0x7f
);
next++, ch = *next
) {}
if ( *next == 0 ) {
next = NULL;
}
// Copy text up to character and then insert escaped character.
if ( next ) {
_TOML_stringifyText( self, cursor, next - cursor );
#define REPLACE( match, value ) \
if ( *next == match ) { \
_TOML_stringifyText( self, value, 2 ); \
}
REPLACE( '\b', "\\b" )
else REPLACE( '\t', "\\t" )
else REPLACE( '\f', "\\f" )
else REPLACE( '\n', "\\n" )
else REPLACE( '\r', "\\r" )
else REPLACE( '"', "\\\"" )
else REPLACE( '/', "\\/" )
else REPLACE( '\\', "\\\\" )
#undef REPLACE
else if ( ((unsigned int) *next ) > 0x7f ) {
int num = 0;
int chsize;
// Decode the numeric representation of the utf8 character
if ( ( *next & 0xe0 ) == 0xe0 ) {
chsize = 3;
num =
( ( next[0] & 0x0f ) << 12 ) |
( ( next[1] & 0x3f ) << 6 ) |
( next[2] & 0x3f );
} else if ( ( *next & 0xc0 ) == 0xc0 ) {
chsize = 2;
num =
( ( next[0] & 0x1f ) << 6 ) |
( next[1] & 0x3f );
} else {
assert( 0 );
}
// Stringify \uxxxx
char utf8Buffer[5];
snprintf( utf8Buffer, 5, "%04x", num );
_TOML_stringifyText( self, "\\u", 2 );
_TOML_stringifyText( self, utf8Buffer, 4 );
next += chsize - 1;
}
next++;
// Copy everything up to the end.
} else {
_TOML_stringifyText( self, cursor, strlen( cursor ) );
}
cursor = next;
}
}
void _TOML_stringifyEntry(
struct _TOMLStringifyData *self, TOMLString *key, TOMLBasic *value
) {
_TOML_stringifyText( self, key->content, key->size );
_TOML_stringifyText( self, " = ", 3 );
if ( value->type == TOML_STRING ) {
_TOML_stringifyText( self, "\"", 1 );
_TOML_stringifyString( self, (TOMLString *) value );
_TOML_stringifyText( self, "\"", 1 );
} else {
_TOML_stringify( self, value );
}
_TOML_stringifyText( self, "\n", 1 );
}
int _TOML_stringify(
struct _TOMLStringifyData *self, TOMLRef src
) {
// Cast to TOMLBasic to discover type.
TOMLBasic *basic = src;
// if null
if ( src == NULL ) {
_TOML_stringifyText( self, "(null)", 6 );
// if table
} else if ( basic->type == TOML_TABLE ) {
TOMLTable *table = src;
// loop keys
for ( int i = 0; i < table->keys->size; ++i ) {
TOMLRef key = TOMLArray_getIndex( table->keys, i );
TOMLRef value = TOMLArray_getIndex( table->values, i );
TOMLBasic *basicValue = value;
// if value is table, print header, recurse
if ( basicValue->type == TOML_TABLE ) {
TOMLTable *tableValue = value;
_TOML_stringifyPushName( self, key );
_TOML_stringifyTableHeader( self, value );
_TOML_stringify( self, value );
_TOML_stringifyPopName( self );
// if value is array
} else if ( basicValue->type == TOML_ARRAY ) {
TOMLArray *array = value;
// if value is object array
if ( array->memberType == TOML_TABLE ) {
// loop indices, print headers, recurse
for ( int j = 0; j < array->size; ++j ) {
_TOML_stringifyPushName( self, key );
_TOML_stringifyArrayHeader( self );
_TOML_stringify( self, TOMLArray_getIndex( array, j ) );
_TOML_stringifyPopName( self );
}
} else {
// print entry line with dense (no newlines) array
_TOML_stringifyEntry( self, key, value );
}
} else {
// if value is string or number, print entry
_TOML_stringifyEntry( self, key, value );
}
}
// if array
} else if ( basic->type == TOML_ARRAY ) {
TOMLArray *array = src;
// print array densely
_TOML_stringifyText( self, "[", 1 );
for ( int i = 0; i < array->size; ++i ) {
_TOML_stringifyText( self, " ", 1 );
TOMLBasic *arrayValue = TOMLArray_getIndex( array, i );
if ( arrayValue->type == TOML_STRING ) {
_TOML_stringifyText( self, "\"", 1 );
_TOML_stringifyString( self, (TOMLString *) arrayValue );
_TOML_stringifyText( self, "\"", 1 );
} else {
_TOML_stringify( self, arrayValue );
}
if ( i != array->size - 1 ) {
_TOML_stringifyText( self, ",", 1 );
} else {
_TOML_stringifyText( self, " ", 1 );
}
}
_TOML_stringifyText( self, "]", 1 );
// if string
} else if ( basic->type == TOML_STRING ) {
TOMLString *string = src;
// print string
_TOML_stringifyText( self, string->content, string->size );
// if number
} else if ( TOML_isNumber( basic ) ) {
TOMLNumber *number = src;
char numberBuffer[ 16 ];
memset( numberBuffer, 0, 16 );
int size;
if ( number->type == TOML_INT ) {
size = snprintf( numberBuffer, 15, "%d", number->intValue );
} else if ( fmod( number->doubleValue, 1 ) == 0 ) {
size = snprintf( numberBuffer, 15, "%.1f", number->doubleValue );
} else {
size = snprintf( numberBuffer, 15, "%g", number->doubleValue );
}
// print number
_TOML_stringifyText( self, numberBuffer, size );
} else if ( basic->type == TOML_BOOLEAN ) {
TOMLBoolean *boolean = (TOMLBoolean *) basic;
if ( boolean->isTrue ) {
_TOML_stringifyText( self, "true", 4 );
} else {
_TOML_stringifyText( self, "false", 5 );
}
} else if ( basic->type == TOML_DATE ) {
TOMLDate *date = (TOMLDate *) basic;
char numberBuffer[ 16 ];
int size;
#define STRINGIFY_DATE_SECTION( format, part, spacer ) \
size = snprintf( numberBuffer, 15, format, date->part ); \
_TOML_stringifyText( self, numberBuffer, size ); \
_TOML_stringifyText( self, spacer, 1 )
STRINGIFY_DATE_SECTION( "%d", year, "-" );
STRINGIFY_DATE_SECTION( "%0.2d", month, "-" );
STRINGIFY_DATE_SECTION( "%0.2d", day, "T" );
STRINGIFY_DATE_SECTION( "%0.2d", hour, ":" );
STRINGIFY_DATE_SECTION( "%0.2d", minute, ":" );
STRINGIFY_DATE_SECTION( "%0.2d", second, "Z" );
#undef STRINGIFY_DATE_SECTION
} else {
assert( 0 );
}
// if error
// print error
return 0;
}
int TOML_stringify( char **buffer, TOMLRef src, TOMLError *error ) {
int bufferSize = 0;
char *output = _TOML_increaseBuffer( NULL, &bufferSize );
int stackSize = 0;
TOMLString **tableNameStack = _TOML_increaseNameStack( NULL, &stackSize );
struct _TOMLStringifyData stringifyData = {
error,
bufferSize,
0,
output,
0,
stackSize,
tableNameStack
};
int errorCode = _TOML_stringify( &stringifyData, src );
free( tableNameStack );
*buffer = stringifyData.buffer;
return errorCode;
}
| mzgoddard/tomlc | toml.c | C | mit | 25,919 |
/* Error handling */
#include "Python.h"
void
PyErr_Restore(PyObject *type, PyObject *value, PyObject *traceback)
{
PyThreadState *tstate = PyThreadState_GET();
PyObject *oldtype, *oldvalue, *oldtraceback;
/* Save these in locals to safeguard against recursive
invocation through Py_XDECREF */
oldtype = tstate->curexc_type;
oldvalue = tstate->curexc_value;
oldtraceback = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = traceback;
Py_XDECREF(oldtype);
Py_XDECREF(oldvalue);
Py_XDECREF(oldtraceback);
}
void
PyErr_SetObject(PyObject *exception, PyObject *value)
{
Py_XINCREF(exception);
Py_XINCREF(value);
PyErr_Restore(exception, value, (PyObject *)NULL);
}
void
PyErr_SetNone(PyObject *exception)
{
PyErr_SetObject(exception, (PyObject *)NULL);
}
void
PyErr_SetString(PyObject *exception, const char *string)
{
PyObject *value = PyString_FromString(string);
PyErr_SetObject(exception, value);
Py_XDECREF(value);
}
PyObject *
PyErr_Occurred(void)
{
PyThreadState *tstate = PyThreadState_GET();
return tstate->curexc_type;
}
int
PyErr_GivenExceptionMatches(PyObject *err, PyObject *exc)
{
if (err == NULL || exc == NULL) {
/* maybe caused by "import exceptions" that failed early on */
return 0;
}
if (PyTuple_Check(exc)) {
int i, n;
n = PyTuple_Size(exc);
for (i = 0; i < n; i++) {
/* Test recursively */
if (PyErr_GivenExceptionMatches(
err, PyTuple_GET_ITEM(exc, i)))
{
return 1;
}
}
return 0;
}
/* err might be an instance, so check its class. */
// if (PyInstance_Check(err))
// err = (PyObject*)((PyInstanceObject*)err)->in_class;
//if (PyClass_Check(err) && PyClass_Check(exc))
// return PyClass_IsSubclass(err, exc);
return err == exc;
}
int
PyErr_ExceptionMatches(PyObject *exc)
{
return PyErr_GivenExceptionMatches(PyErr_Occurred(), exc);
}
/* Used in many places to normalize a raised exception, including in
eval_code2(), do_raise(), and PyErr_Print()
*/
void
PyErr_NormalizeException(PyObject **exc, PyObject **val, PyObject **tb)
{
PyObject *type = *exc;
PyObject *value = *val;
PyObject *inclass = NULL;
PyObject *initial_tb = NULL;
if (type == NULL) {
/* There was no exception, so nothing to do. */
return;
}
/* If PyErr_SetNone() was used, the value will have been actually
set to NULL.
*/
if (!value) {
value = Py_None;
Py_INCREF(value);
}
if (PyInstance_Check(value))
inclass = (PyObject*)((PyInstanceObject*)value)->in_class;
/* Normalize the exception so that if the type is a class, the
value will be an instance.
*/
if (PyClass_Check(type)) {
/* if the value was not an instance, or is not an instance
whose class is (or is derived from) type, then use the
value as an argument to instantiation of the type
class.
*/
if (!inclass || !PyClass_IsSubclass(inclass, type)) {
PyObject *args, *res;
if (value == Py_None)
args = Py_BuildValue("()");
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
}
else
args = Py_BuildValue("(O)", value);
if (args == NULL)
goto finally;
res = PyEval_CallObject(type, args);
Py_DECREF(args);
if (res == NULL)
goto finally;
Py_DECREF(value);
value = res;
}
/* if the class of the instance doesn't exactly match the
class of the type, believe the instance
*/
else if (inclass != type) {
Py_DECREF(type);
type = inclass;
Py_INCREF(type);
}
}
*exc = type;
*val = value;
return;
finally:
Py_DECREF(type);
Py_DECREF(value);
/* If the new exception doesn't set a traceback and the old
exception had a traceback, use the old traceback for the
new exception. It's better than nothing.
*/
initial_tb = *tb;
PyErr_Fetch(exc, val, tb);
if (initial_tb != NULL) {
if (*tb == NULL)
*tb = initial_tb;
else
Py_DECREF(initial_tb);
}
/* normalize recursively */
PyErr_NormalizeException(exc, val, tb);
}
void
PyErr_Fetch(PyObject **p_type, PyObject **p_value, PyObject **p_traceback)
{
PyThreadState *tstate = PyThreadState_Get();
*p_type = tstate->curexc_type;
*p_value = tstate->curexc_value;
*p_traceback = tstate->curexc_traceback;
tstate->curexc_type = NULL;
tstate->curexc_value = NULL;
tstate->curexc_traceback = NULL;
}
void
PyErr_Clear(void)
{
PyErr_Restore(NULL, NULL, NULL);
}
/* Convenience functions to set a type error exception and return 0 */
int
PyErr_BadArgument(void)
{
PyErr_SetString(PyExc_TypeError,
"bad argument type for built-in operation");
return 0;
}
PyObject *
PyErr_NoMemory(void)
{
if (PyErr_ExceptionMatches(PyExc_MemoryError))
/* already current */
return NULL;
/* raise the pre-allocated instance if it still exists */
if (PyExc_MemoryErrorInst)
PyErr_SetObject(PyExc_MemoryError, PyExc_MemoryErrorInst);
else
/* this will probably fail since there's no memory and hee,
hee, we have to instantiate this class
*/
PyErr_SetNone(PyExc_MemoryError);
return NULL;
}
/* ... */
void
_PyErr_BadInternalCall(char *filename, int lineno)
{
PyErr_Format(PyExc_SystemError,
"%s:%d: bad argument to internal function",
filename, lineno);
}
/* Remove the preprocessor macro for PyErr_BadInternalCall() so that we can
export the entry point for existing object code: */
#undef PyErr_BadInternalCall
void
PyErr_BadInternalCall(void)
{
PyErr_Format(PyExc_SystemError,
"bad argument to internal function");
}
#define PyErr_BadInternalCall() _PyErr_BadInternalCall(__FILE__, __LINE__)
PyObject *
PyErr_Format(PyObject *exception, const char *format, ...)
{
va_list vargs;
PyObject* string;
va_start(vargs, format);
string = PyString_FromFormatV(format, vargs);
PyErr_SetObject(exception, string);
Py_XDECREF(string);
va_end(vargs);
return NULL;
}
PyObject *
PyErr_NewException(char *name, PyObject *base, PyObject *dict)
{
char *dot;
PyObject *modulename = NULL;
PyObject *classname = NULL;
PyObject *mydict = NULL;
PyObject *bases = NULL;
PyObject *result = NULL;
dot = strrchr(name, '.');
if (dot == NULL) {
PyErr_SetString(PyExc_SystemError,
"PyErr_NewException: name must be module.class");
return NULL;
}
if (base == NULL)
base = PyExc_Exception;
if (!PyClass_Check(base)) {
/* Must be using string-based standard exceptions (-X) */
return PyString_FromString(name);
}
if (dict == NULL) {
dict = mydict = PyDict_New();
if (dict == NULL)
goto failure;
}
if (PyDict_GetItemString(dict, "__module__") == NULL) {
modulename = PyString_FromStringAndSize(name, (int)(dot-name));
if (modulename == NULL)
goto failure;
if (PyDict_SetItemString(dict, "__module__", modulename) != 0)
goto failure;
}
classname = PyString_FromString(dot+1);
if (classname == NULL)
goto failure;
bases = Py_BuildValue("(O)", base);
if (bases == NULL)
goto failure;
result = PyClass_New(bases, dict, classname);
failure:
Py_XDECREF(bases);
Py_XDECREF(mydict);
Py_XDECREF(classname);
Py_XDECREF(modulename);
return result;
}
/* Call when an exception has occurred but there is no way for Python
to handle it. Examples: exception in __del__ or during GC. */
void
PyErr_WriteUnraisable(PyObject *obj)
{
printf("Unraisable Exception\n");
// PyObject *f, *t, *v, *tb;
// PyErr_Fetch(&t, &v, &tb);
// f = PySys_GetObject("stderr");
// if (f != NULL) {
// PyFile_WriteString("Exception ", f);
// if (t) {
// PyFile_WriteObject(t, f, Py_PRINT_RAW);
// if (v && v != Py_None) {
// PyFile_WriteString(": ", f);
// PyFile_WriteObject(v, f, 0);
// }
// }
// PyFile_WriteString(" in ", f);
// PyFile_WriteObject(obj, f, 0);
// PyFile_WriteString(" ignored\n", f);
// PyErr_Clear(); /* Just in case */
// }
// Py_XDECREF(t);
// Py_XDECREF(v);
// Py_XDECREF(tb);
}
extern PyObject *PyModule_GetWarningsModule();
/* Function to issue a warning message; may raise an exception. */
int
PyErr_Warn(PyObject *category, char *message)
{
PyObject *dict, *func = NULL;
PyObject *warnings_module = PyModule_GetWarningsModule();
if (warnings_module != NULL) {
dict = PyModule_GetDict(warnings_module);
func = PyDict_GetItemString(dict, "warn");
}
if (func == NULL) {
printf("warning: %s\n", message);
return 0;
}
else {
PyObject *args, *res;
if (category == NULL)
category = PyExc_RuntimeWarning;
args = Py_BuildValue("(sO)", message, category);
if (args == NULL)
return -1;
res = PyEval_CallObject(func, args);
Py_DECREF(args);
if (res == NULL)
return -1;
Py_DECREF(res);
return 0;
}
}
| jtauber/cleese | necco/python/Python/errors.c | C | mit | 8,613 |
/* This is a managed file. Do not delete this comment. */
#include <include/lifecycle.h>
static void echo(lifecycle_Foo this, char* hook) {
corto_state s = corto_stateof(this);
char *stateStr = corto_ptr_str(&s, corto_state_o, 0);
corto_info("callback: %s [%s]",
hook,
stateStr);
free(stateStr);
}
int16_t lifecycle_Foo_construct(
lifecycle_Foo this)
{
echo(this, "construct");
return 0;
}
void lifecycle_Foo_define(
lifecycle_Foo this)
{
echo(this, "define");
}
void lifecycle_Foo_deinit(
lifecycle_Foo this)
{
echo(this, "deinit");
}
void lifecycle_Foo_delete(
lifecycle_Foo this)
{
echo(this, "delete");
}
void lifecycle_Foo_destruct(
lifecycle_Foo this)
{
echo(this, "destruct");
}
int16_t lifecycle_Foo_init(
lifecycle_Foo this)
{
echo(this, "init");
return 0;
}
void lifecycle_Foo_update(
lifecycle_Foo this)
{
echo(this, "update");
}
int16_t lifecycle_Foo_validate(
lifecycle_Foo this)
{
echo(this, "validate");
return 0;
}
| cortoproject/examples | c/modeling/lifecycle/src/Foo.c | C | mit | 1,051 |
//#include <stdio.h>
//#include <stdlib.h>
//#include <stdint.h>
//#include <stdbool.h>
//#include <string.h>
//#include <stddef.h>
#include "esp_common.h"
#include "coap.h"
#include "shell.h"
//#include <rtthread.h>
//#define shell_printf rt_kshell_printf
extern void endpoint_setup(void);
extern const coap_endpoint_t endpoints[];
#ifdef MICROCOAP_DEBUG
void ICACHE_FLASH_ATTR coap_dumpHeader(coap_header_t *hdr)
{
shell_printf("Header:\n");
shell_printf(" ver 0x%02X\n", hdr->ver);
shell_printf(" t 0x%02X\n", hdr->t);
shell_printf(" tkl 0x%02X\n", hdr->tkl);
shell_printf(" code 0x%02X\n", hdr->code);
shell_printf(" id 0x%02X%02X\n", hdr->id[0], hdr->id[1]);
}
#endif
#ifdef MICROCOAP_DEBUG
void ICACHE_FLASH_ATTR coap_dump(const uint8_t *buf, size_t buflen, bool bare)
{
if (bare)
{
while(buflen--)
shell_printf("%02X%s", *buf++, (buflen > 0) ? " " : "");
}
else
{
shell_printf("Dump: ");
while(buflen--)
shell_printf("%02X%s", *buf++, (buflen > 0) ? " " : "");
shell_printf("\n");
}
}
#endif
int ICACHE_FLASH_ATTR coap_parseHeader(coap_header_t *hdr, const uint8_t *buf, size_t buflen)
{
if (buflen < 4)
return COAP_ERR_HEADER_TOO_SHORT;
hdr->ver = (buf[0] & 0xC0) >> 6;
if (hdr->ver != 1)
return COAP_ERR_VERSION_NOT_1;
hdr->t = (buf[0] & 0x30) >> 4;
hdr->tkl = buf[0] & 0x0F;
hdr->code = buf[1];
hdr->id[0] = buf[2];
hdr->id[1] = buf[3];
return 0;
}
int ICACHE_FLASH_ATTR coap_parseToken(coap_buffer_t *tokbuf, const coap_header_t *hdr, const uint8_t *buf, size_t buflen)
{
if (hdr->tkl == 0)
{
tokbuf->p = NULL;
tokbuf->len = 0;
return 0;
}
else
if (hdr->tkl <= 8)
{
if (4U + hdr->tkl > buflen)
return COAP_ERR_TOKEN_TOO_SHORT; // tok bigger than packet
tokbuf->p = buf+4; // past header
tokbuf->len = hdr->tkl;
return 0;
}
else
{
// invalid size
return COAP_ERR_TOKEN_TOO_SHORT;
}
}
// advances p
int ICACHE_FLASH_ATTR coap_parseOption(coap_option_t *option, uint16_t *running_delta, const uint8_t **buf, size_t buflen)
{
const uint8_t *p = *buf;
uint8_t headlen = 1;
uint16_t len, delta;
if (buflen < headlen) // too small
return COAP_ERR_OPTION_TOO_SHORT_FOR_HEADER;
delta = (p[0] & 0xF0) >> 4;
len = p[0] & 0x0F;
// These are untested and may be buggy
if (delta == 13)
{
headlen++;
if (buflen < headlen)
return COAP_ERR_OPTION_TOO_SHORT_FOR_HEADER;
delta = p[1] + 13;
p++;
}
else
if (delta == 14)
{
headlen += 2;
if (buflen < headlen)
return COAP_ERR_OPTION_TOO_SHORT_FOR_HEADER;
delta = ((p[1] << 8) | p[2]) + 269;
p+=2;
}
else
if (delta == 15)
return COAP_ERR_OPTION_DELTA_INVALID;
if (len == 13)
{
headlen++;
if (buflen < headlen)
return COAP_ERR_OPTION_TOO_SHORT_FOR_HEADER;
len = p[1] + 13;
p++;
}
else
if (len == 14)
{
headlen += 2;
if (buflen < headlen)
return COAP_ERR_OPTION_TOO_SHORT_FOR_HEADER;
len = ((p[1] << 8) | p[2]) + 269;
p+=2;
}
else
if (len == 15)
return COAP_ERR_OPTION_LEN_INVALID;
if ((p + 1 + len) > (*buf + buflen))
return COAP_ERR_OPTION_TOO_BIG;
//shell_printf("option num=%d\n", delta + *running_delta);
option->num = delta + *running_delta;
option->buf.p = p+1;
option->buf.len = len;
//coap_dump(p+1, len, false);
// advance buf
*buf = p + 1 + len;
*running_delta += delta;
return 0;
}
// http://tools.ietf.org/html/rfc7252#section-3.1
int ICACHE_FLASH_ATTR coap_parseOptionsAndPayload(coap_option_t *options, uint8_t *numOptions, coap_buffer_t *payload, const coap_header_t *hdr, const uint8_t *buf, size_t buflen)
{
size_t optionIndex = 0;
uint16_t delta = 0;
const uint8_t *p = buf + 4 + hdr->tkl;
const uint8_t *end = buf + buflen;
int rc;
if (p > end)
return COAP_ERR_OPTION_OVERRUNS_PACKET; // out of bounds
//coap_dump(p, end - p);
// 0xFF is payload marker
while((optionIndex < *numOptions) && (p < end) && (*p != 0xFF))
{
if (0 != (rc = coap_parseOption(&options[optionIndex], &delta, &p, end-p)))
return rc;
optionIndex++;
}
*numOptions = optionIndex;
if (p+1 < end && *p == 0xFF) // payload marker
{
payload->p = p+1;
payload->len = end-(p+1);
}
else
{
payload->p = NULL;
payload->len = 0;
}
return 0;
}
#ifdef MICROCOAP_DEBUG
void ICACHE_FLASH_ATTR coap_dumpOptions(coap_option_t *opts, size_t numopt)
{
size_t i;
shell_printf("Options:\n");
for (i=0;i<numopt;i++)
{
shell_printf(" 0x%02X [ ", opts[i].num);
coap_dump(opts[i].buf.p, opts[i].buf.len, true);
shell_printf(" ]\n");
}
}
#endif
#ifdef MICROCOAP_DEBUG
void ICACHE_FLASH_ATTR coap_dumpPacket(coap_packet_t *pkt)
{
coap_dumpHeader(&pkt->hdr);
coap_dumpOptions(pkt->opts, pkt->numopts);
shell_printf("Payload: \n");
coap_dump(pkt->payload.p, pkt->payload.len, true);
shell_printf("\n");
}
#endif
int ICACHE_FLASH_ATTR coap_parse(coap_packet_t *pkt, const uint8_t *buf, size_t buflen)
{
int rc;
// coap_dump(buf, buflen, false);
if (0 != (rc = coap_parseHeader(&pkt->hdr, buf, buflen)))
return rc;
// coap_dumpHeader(&hdr);
if (0 != (rc = coap_parseToken(&pkt->tok, &pkt->hdr, buf, buflen)))
return rc;
pkt->numopts = MAXOPT;
if (0 != (rc = coap_parseOptionsAndPayload(pkt->opts, &(pkt->numopts), &(pkt->payload), &pkt->hdr, buf, buflen)))
return rc;
// coap_dumpOptions(opts, numopt);
return 0;
}
// options are always stored consecutively, so can return a block with same option num
const coap_option_t * ICACHE_FLASH_ATTR coap_findOptions(const coap_packet_t *pkt, uint8_t num, uint8_t *count)
{
// FIXME, options is always sorted, can find faster than this
size_t i;
const coap_option_t *first = NULL;
*count = 0;
for (i=0;i<pkt->numopts;i++)
{
if (pkt->opts[i].num == num)
{
if (NULL == first)
first = &pkt->opts[i];
(*count)++;
}
else
{
if (NULL != first)
break;
}
}
return first;
}
int ICACHE_FLASH_ATTR coap_buffer_to_string(char *strbuf, size_t strbuflen, const coap_buffer_t *buf)
{
if (buf->len+1 > strbuflen)
return COAP_ERR_BUFFER_TOO_SMALL;
memcpy(strbuf, buf->p, buf->len);
strbuf[buf->len] = 0;
return 0;
}
int ICACHE_FLASH_ATTR coap_build(uint8_t *buf, size_t *buflen, const coap_packet_t *pkt)
{
size_t opts_len = 0;
size_t i;
uint8_t *p;
uint16_t running_delta = 0;
// build header
if (*buflen < (4U + pkt->hdr.tkl))
return COAP_ERR_BUFFER_TOO_SMALL;
buf[0] = (pkt->hdr.ver & 0x03) << 6;
buf[0] |= (pkt->hdr.t & 0x03) << 4;
buf[0] |= (pkt->hdr.tkl & 0x0F);
buf[1] = pkt->hdr.code;
buf[2] = pkt->hdr.id[0];
buf[3] = pkt->hdr.id[1];
// inject token
p = buf + 4;
if ((pkt->hdr.tkl > 0) && (pkt->hdr.tkl != pkt->tok.len))
return COAP_ERR_UNSUPPORTED;
if (pkt->hdr.tkl > 0)
memcpy(p, pkt->tok.p, pkt->hdr.tkl);
// // http://tools.ietf.org/html/rfc7252#section-3.1
// inject options
p += pkt->hdr.tkl;
for (i=0;i<pkt->numopts;i++)
{
uint32_t optDelta;
uint8_t len, delta = 0;
if (((size_t)(p-buf)) > *buflen)
return COAP_ERR_BUFFER_TOO_SMALL;
optDelta = pkt->opts[i].num - running_delta;
coap_option_nibble(optDelta, &delta);
coap_option_nibble((uint32_t)pkt->opts[i].buf.len, &len);
*p++ = (0xFF & (delta << 4 | len));
if (delta == 13)
{
*p++ = (optDelta - 13);
}
else
if (delta == 14)
{
*p++ = ((optDelta-269) >> 8);
*p++ = (0xFF & (optDelta-269));
}
if (len == 13)
{
*p++ = (pkt->opts[i].buf.len - 13);
}
else
if (len == 14)
{
*p++ = (pkt->opts[i].buf.len >> 8);
*p++ = (0xFF & (pkt->opts[i].buf.len-269));
}
memcpy(p, pkt->opts[i].buf.p, pkt->opts[i].buf.len);
p += pkt->opts[i].buf.len;
running_delta = pkt->opts[i].num;
}
opts_len = (p - buf) - 4; // number of bytes used by options
if (pkt->payload.len > 0)
{
if (*buflen < 4 + 1 + pkt->payload.len + opts_len)
return COAP_ERR_BUFFER_TOO_SMALL;
buf[4 + opts_len] = 0xFF; // payload marker
memcpy(buf+5 + opts_len, pkt->payload.p, pkt->payload.len);
*buflen = opts_len + 5 + pkt->payload.len;
}
else
*buflen = opts_len + 4;
return 0;
}
void ICACHE_FLASH_ATTR coap_option_nibble(uint32_t value, uint8_t *nibble)
{
if (value<13)
{
*nibble = (0xFF & value);
}
else
if (value<=0xFF+13)
{
*nibble = 13;
} else if (value<=0xFFFF+269)
{
*nibble = 14;
}
}
int ICACHE_FLASH_ATTR coap_make_response(coap_rw_buffer_t *scratch, coap_packet_t *pkt, const uint8_t *content, size_t content_len, uint8_t msgid_hi, uint8_t msgid_lo, const coap_buffer_t* tok, coap_responsecode_t rspcode, coap_content_type_t content_type)
{
pkt->hdr.ver = 0x01;
pkt->hdr.t = COAP_TYPE_ACK;
pkt->hdr.tkl = 0;
pkt->hdr.code = rspcode;
pkt->hdr.id[0] = msgid_hi;
pkt->hdr.id[1] = msgid_lo;
pkt->numopts = 1;
// need token in response
if (tok) {
pkt->hdr.tkl = tok->len;
pkt->tok = *tok;
}
// safe because 1 < MAXOPT
pkt->opts[0].num = COAP_OPTION_CONTENT_FORMAT;
pkt->opts[0].buf.p = scratch->p;
if (scratch->len < 2)
return COAP_ERR_BUFFER_TOO_SMALL;
scratch->p[0] = ((uint16_t)content_type & 0xFF00) >> 8;
scratch->p[1] = ((uint16_t)content_type & 0x00FF);
pkt->opts[0].buf.len = 2;
pkt->payload.p = content;
pkt->payload.len = content_len;
return 0;
}
// FIXME, if this looked in the table at the path before the method then
// it could more easily return 405 errors
int ICACHE_FLASH_ATTR coap_handle_req(coap_rw_buffer_t *scratch, const coap_packet_t *inpkt, coap_packet_t *outpkt)
{
const coap_option_t *opt;
uint8_t count;
int i;
const coap_endpoint_t *ep = endpoints;
while(NULL != ep->handler)
{
if (ep->method != inpkt->hdr.code)
goto next;
if (NULL != (opt = coap_findOptions(inpkt, COAP_OPTION_URI_PATH, &count)))
{
if (count != ep->path->count)
goto next;
for (i=0;i<count;i++)
{
if (opt[i].buf.len != strlen(ep->path->elems[i]))
goto next;
if (0 != memcmp(ep->path->elems[i], opt[i].buf.p, opt[i].buf.len))
goto next;
}
// match!
return ep->handler(scratch, inpkt, outpkt, inpkt->hdr.id[0], inpkt->hdr.id[1]);
}
next:
ep++;
}
coap_make_response(scratch, outpkt, NULL, 0, inpkt->hdr.id[0], inpkt->hdr.id[1], &inpkt->tok, COAP_RSPCODE_NOT_FOUND, COAP_CONTENTTYPE_NONE);
return 0;
}
void coap_setup(void)
{
}
| AccretionD/ESP8266_freertos_coap | app/user/coap.c | C | mit | 12,020 |
#include <stdio.h>
#include <stdlib.h> /* exit, free */
#include <string.h> /* for manipulating filename */
#include "defines.h" /* type definitions and macros for flags and MAX limits */
#include "structs.h" /* structures used (needs defines.h) */
Category* cats[ MAX_TOT_CATS ]; /* array of all Categories */
Property* props[ MAX_TOT_PROPS ]; /* array of all Properties */
short tot_cats = 0, /* total Categories */
tot_props = 0, /* total Properties */
max_cat_name = 0; /* used to format the output to look nice */
char *in_file = NULL, /* name of file for input */
*out_file = NULL; /* name of file for ouput (if any) */
/* **** debug **** */
#if DEBUG
int main_vars_size = sizeof( cats ) + sizeof( props ) + sizeof( short ) * 3 + sizeof( char ) * 2;
#endif
/* local functions */
Flag parse_args( int num_args, char* args[] );
void print_man( char* prog_name );
int cleanup( void );
int free_expr( Expression* expr );
/* input.c functions */
void parse_file( void );
/* output.c functions */
int generator( Flag flags );
/* **** debug **** */
#if DEBUG
void debug_info( void );
int vars_size( void );
#endif
int main( int argc, char* argv[] )
{
Flag flags; /* program flags */
int num_frames;
char filename[ 30 ],
answer[ 5 ]; /* user response */
if ( argc == 1 )
{
printf( "\nUSAGE: %s [ --manpage ] [ -cs ] input_file [ -o output_file ]\n\n", argv[0] );
return EXIT_SUCCESS;
}
else
flags = parse_args( argc, argv );
if ( in_file == NULL )
{
printf( "\nNo input file provided.\nQuitting\n\n" );
return EXIT_FAILURE;
}
if ( flags & STD_OUTPUT )
out_file = "the standard output";
else if ( flags & OUTPUT_FILE )
{
if ( out_file == NULL )
{
printf( "\nNo output file provided.\nQuitting\n\n" );
return EXIT_FAILURE;
}
}
else
{
strcpy( filename, in_file );
strcat( filename, ".tsl" );
out_file = filename;
}
parse_file();
/* **** debug **** */
#if DEBUG
debug_info();
#endif
num_frames = generator( flags );
if ( flags & COUNT_ONLY )
{
printf( "\n\t%d test frames generated\n\n", num_frames );
printf( "Write test frames to %s (y/n)? ", out_file );
scanf( "%s", answer );
if ( answer[0] == 'y' || answer[0] == 'Y' )
printf( "\n\t%d test frames written to %s\n\n", generator( flags & ~COUNT_ONLY ), out_file );
}
else
printf( "\n\t%d test frames generated and written to %s\n\n", num_frames, out_file );
/* **** debug **** */
#if DEBUG
printf( "program base storage = %d bytes\n", vars_size() );
printf( "program total storage = %d bytes\n\n", cleanup() + vars_size() );
#else
cleanup();
#endif
return EXIT_SUCCESS;
}
/* Parse the command line arguments and set flags accordingly */
Flag parse_args( int num_args, char* args[] )
{
Flag flags = 0;
short i, j;
for ( i = 1; i < num_args; i++ )
{
if ( strcmp( args[i], "--manpage" ) == 0 )
{
print_man( args[0] );
exit( EXIT_SUCCESS );
}
if ( *args[i] == '-' )
for ( j = 1; j < strlen( args[i] ); j++ )
{
switch ( args[i][j] )
{
case 'c':
flags = flags | COUNT_ONLY;
break;
case 's':
flags = flags | STD_OUTPUT;
break;
case 'o':
if ( !( flags & STD_OUTPUT ) )
{
flags = flags | OUTPUT_FILE;
out_file = args[ i + 1 ];
}
return flags;
}
}
else
in_file = args[i];
}
return flags;
}
/* Print the tsl manpage */
void print_man( char* prog_name )
{
printf( "\nNAME\n\ttsl - generate test frames from a specification file\n" );
printf( "\nSYNOPSIS\n\ttsl [ --manpage ] [ -cs ] input_file [ -o output_file ]\n" );
printf( "\nDESCRIPTION\n\tThe TSL utility generates test frames from a specification file\n" );
printf( "\twritten in the extended Test Specification Language. By default\n" );
printf( "\tit writes the test frames to a new file created by appending a\n" );
printf( "\t'.tsl' extension to the input_file's name. Options can be used\n" );
printf( "\tto modify the output.\n" );
printf( "\nOPTIONS\n\tThe following options are supported:\n" );
printf( "\n\t--manpage\n\t\tPrint this man page.\n" );
printf( "\n\t-c\tReport the number of test frames generated, but don't\n" );
printf( "\t\twrite them to the output. After the number of frames is\n" );
printf( "\t\treported you will be given the option of writing them\n" );
printf( "\t\tto the output.\n" );
printf( "\n\t-s\tOutput is the standard output.\n" );
printf( "\n\t-o output_file\n\t\tOutput is the file output_file unless the -s option is used.\n\n" );
}
/* Free the memory allocated by the program and return how much */
int cleanup( void )
{
Choice* curr_choice;
int total_size = 0;
short i, j;
for ( i = 0; i < tot_cats; i++ )
{
total_size += sizeof( Category );
for ( j = 0; j < cats[i] -> num_choices; j++ )
{
total_size += sizeof( Choice );
curr_choice = cats[i] -> choices[j];
if ( curr_choice -> flags & IF_EXPR )
total_size += free_expr( curr_choice -> if_expr );
free( curr_choice );
}
free( cats[i] );
}
for ( i = 0; i < tot_props; i++ )
{
total_size += sizeof( Property );
free( props[i] );
}
return total_size;
}
/* Free all the memory associated with an Expression (recursive) and return how much */
int free_expr( Expression* expr )
{
int expr_size = sizeof( Expression );
if ( expr -> flags & EXPR_A )
expr_size += free_expr( expr -> exprA );
if ( expr -> flags & EXPR_B )
expr_size += free_expr( expr -> exprB );
free( expr );
return expr_size;
}
| pastoref/VendingMachine | support/categoryPartitionTool-TSL/main.c | C | mit | 6,585 |
/* Aranea
* Copyright (c) 2011-2012, Quoc-Viet Nguyen
* See LICENSE file for copyright and license details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <aranea/aranea.h>
#define CGI_EXT_LEN_ ((int)sizeof(CGI_EXT) - 1)
/** Buffer for CGI environment variables */
#define CGI_BUFF g_buff
int cgi_hit(const char *name, const int len) {
if (len > CGI_EXT_LEN_) {
if (memcmp(name + len - CGI_EXT_LEN_, CGI_EXT, CGI_EXT_LEN_) == 0) {
return 1;
}
}
return 0;
}
/** Check if file is executable.
* HTTP error code is set to client->response.status_code.
*/
static
int cgi_is_executable(const char *path, struct client_t *client) {
struct stat st;
if (access(path, X_OK) != 0) {
client->response.status_code = HTTP_STATUS_FORBIDDEN;
return -1;
}
if (stat(path, &st) == -1) {
A_ERR("stat: %s", strerror(errno));
client->response.status_code = HTTP_STATUS_SERVERERROR;
return -1;
}
if (S_ISDIR(st.st_mode)) {
client->response.status_code = HTTP_STATUS_FORBIDDEN;
return -1;
}
return 0;
}
#define CGI_ADD_ENV_(env, cnt, buf, ...) \
do { \
*env = buf; \
len = sizeof(CGI_BUFF) - (buf - CGI_BUFF); \
if (len > 0) { \
len = snprintf(buf, len, __VA_ARGS__); \
buf += len + 1; /* skip NULL */ \
++env; \
++cnt; \
} \
} while (0)
/** Generate CGI environment from HTTP request.
* Values are saved in g_buff (g_cgienv)
*/
static
int cgi_gen_env(const struct request_t *req, char **env) {
int cnt, len;
char *buf;
cnt = 0;
buf = CGI_BUFF;
#ifdef CGI_DOCUMENT_ROOT
CGI_ADD_ENV_(env, cnt, buf, "DOCUMENT_ROOT=%s", g_config.root);
#endif
#ifdef CGI_REQUEST_METHOD
CGI_ADD_ENV_(env, cnt, buf, "REQUEST_METHOD=%s", req->method);
#endif
#ifdef CGI_REQUEST_URI
CGI_ADD_ENV_(env, cnt, buf, "REQUEST_URI=%s", req->url);
#endif
if (req->query_string) {
CGI_ADD_ENV_(env, cnt, buf, "QUERY_STRING=%s", req->query_string);
}
if (req->header[HEADER_CONTENTTYPE]) {
CGI_ADD_ENV_(env, cnt, buf, "CONTENT_TYPE=%s", req->header[HEADER_CONTENTTYPE]);
}
if (req->header[HEADER_CONTENTLENGTH]) {
CGI_ADD_ENV_(env, cnt, buf, "CONTENT_LENGTH=%s", req->header[HEADER_CONTENTLENGTH]);
}
#ifdef CGI_HTTP_COOKIE
if (req->header[HEADER_COOKIE]) {
CGI_ADD_ENV_(env, cnt, buf, "HTTP_COOKIE=%s", req->header[HEADER_COOKIE]);
}
#endif
*env = NULL;
return cnt;
}
#if HAVE_VFORK == 1
# define FORK_() vfork()
# define EXIT_(x) _exit(x)
#else
# define FORK_() fork()
# define EXIT_(x) exit(x)
#endif /* HAVE_VFORK */
/** Execute file.
* HTTP error code is set to client->response.status_code.
*/
static
int cgi_exec(const char *path, struct client_t *client) {
char *argv[2];
char *envp[MAX_CGIENV_ITEM];
pid_t pid;
int newio;
/* set socket back to blocking */
newio = fcntl(client->remote_fd, F_GETFL, NULL);
if (newio == -1
|| fcntl(client->remote_fd, F_SETFL, newio & (~O_NONBLOCK)) == -1) {
A_ERR("fcntl: F_SETFL O_NONBLOCK %s", strerror(errno));
client->response.status_code = HTTP_STATUS_SERVERERROR;
return -1;
}
pid = FORK_();
if (pid < 0) {
client->response.status_code = HTTP_STATUS_SERVERERROR;
return -1;
}
if (pid == 0) { /* child */
/* Generate CGI parameters before touching to the buffer */
cgi_gen_env(&client->request, envp);
/* Send minimal header */
client->response.status_code = HTTP_STATUS_OK;
client->data_length = http_gen_header(&client->response, client->data,
sizeof(client->data), 0);
if (send(client->remote_fd, client->data, client->data_length, 0) < 0) {
EXIT_(1);
}
/* Tie CGI's stdin to the socket */
if (client->flags & CLIENT_FLAG_POST) {
if (dup2(client->remote_fd, STDIN_FILENO) < 0) {
EXIT_(1);
}
}
/* Tie CGI's stdout to the socket */
if (dup2(client->remote_fd, STDOUT_FILENO) < 0) {
EXIT_(1);
}
/* close unused FDs */
server_close_fds();
/* No error log */
newio = open("/dev/null", O_WRONLY);
if (newio != STDERR_FILENO) {
dup2(newio, STDERR_FILENO);
close(newio);
}
/* Execute cgi script */
argv[0] = (char *)path;
argv[1] = NULL;
execve(path, argv, envp);
EXIT_(1); /* exec error */
}
/* parent */
client->state = STATE_NONE; /* Remove this client */
return 0;
}
int cgi_process(struct client_t *client, const char *path) {
if (cgi_is_executable(path, client) != 0) {
return -1;
}
if (client->flags & CLIENT_FLAG_HEADERONLY) {
client->response.status_code = HTTP_STATUS_OK;
client->data_length = http_gen_header(&client->response, client->data,
sizeof(client->data), HTTP_FLAG_END);
client->state = STATE_SEND_HEADER;
return 0;
}
return cgi_exec(path, client);
}
/* vim: set ts=4 sw=4 expandtab: */
| nqv/aranea | src/cgi.c | C | mit | 5,878 |
#include "minunit.h"
#include <lcthw/darray_algos.h>
#include <stdlib.h>
#include <time.h>
#include <limits.h>
static inline int intcmp(int **a, int **b) {
return **a - **b;
}
static inline int sintcmp(int *a, int *b) {
return *a - *b;
}
int make_random(DArray *array, size_t n) {
srand(time(NULL));
size_t i = 0;
for(i = 0; i < n; i++) {
int *random = DArray_new(array);
*random = rand();
check(DArray_push(array, random) == 0, "Inserting random values failed.");
}
return 0;
error:
return -1;
}
int is_sorted(DArray *array, DArray_compare cmp) {
int i = 0;
for(i = 0; i < DArray_count(array) - 1; i++) {
if(cmp(DArray_get(array, i), DArray_get(array, i+1)) > 0) {
return 0;
}
}
return 1;
}
char *run_sort_test(int (*func)(DArray *, DArray_compare), const char *name) {
DArray *nums = DArray_create(sizeof(int *), 20);
int rc = make_random(nums, 20);
mu_assert(rc == 0, "Randomization failed.");
mu_assert(!is_sorted(nums, (DArray_compare)sintcmp), "Numbers should start not sorted.");
debug("--- Testing %s sorting algorithm", name);
rc = func(nums, (DArray_compare)intcmp);
mu_assert(rc == 0, "Sort failed.");
mu_assert(is_sorted(nums, (DArray_compare)sintcmp), "Sort didn't sort properly.");
DArray_clear_destroy(nums);
return NULL;
}
char *test_qsort() {
return run_sort_test(DArray_qsort, "qsort");
}
char *test_heapsort() {
return run_sort_test(DArray_heapsort, "heapsort");
}
char *test_mergesort() {
return run_sort_test(DArray_mergesort, "mergesort");
}
char *speed_sort_test(int (*func)(DArray *, DArray_compare), const char *name) {
size_t N = 10000;
debug("--- Testing the speed of %s", name);
DArray *source = DArray_create(sizeof(void *), N+1);
clock_t fastest = LONG_MAX;
int rc = make_random(source, N);
mu_assert(rc == 0, "Randomizing the source DArray failed.");
int i = 0;
for(i = 0; i < 25; i++) {
DArray *test = DArray_create(sizeof(int *), N+1);
rc = DArray_copy(source, test);
mu_assert(rc == 0, "Copy failed.");
clock_t elapsed = -clock();
rc = func(test, (DArray_compare)intcmp);
elapsed += clock();
mu_assert(rc == 0, "Sort failed.");
mu_assert(is_sorted(test, (DArray_compare)sintcmp), "Sort didn't sort properly.");
if(elapsed < fastest) fastest = elapsed;
DArray_destroy(test);
}
debug("Fastest time for sort: %s, size %zu: %f", name, N, ((float)fastest)/CLOCKS_PER_SEC);
DArray_clear_destroy(source);
return NULL;
}
char *test_speed_qsort() {
return speed_sort_test(DArray_qsort, "quicksort");
}
char *test_speed_mergesort() {
return speed_sort_test(DArray_mergesort, "mergesort");
}
char *test_speed_heapsort() {
return speed_sort_test(DArray_heapsort, "heapsort");
}
char *test_cmp() {
DArray *fake = DArray_create(sizeof(int), 10);
int *num1 = DArray_new(fake);
int *num2 = DArray_new(fake);
*num1 = 100;
*num2 = 20;
mu_assert(sintcmp(num1, num2) > 0, "Comparison fails on 100, 20.");
*num1 = 50;
*num2 = 50;
mu_assert(sintcmp(num1, num2) == 0, "Comparison fails on 50, 50.");
*num1 = 30;
*num2 = 60;
mu_assert(sintcmp(num1, num2) < 0, "Comparison fails on 30, 60.");
DArray_clear_destroy(fake);
return NULL;
}
char *all_tests() {
mu_suite_start();
mu_run_test(test_cmp);
mu_run_test(test_qsort);
mu_run_test(test_heapsort);
mu_run_test(test_mergesort);
mu_run_test(test_speed_qsort);
mu_run_test(test_speed_mergesort);
mu_run_test(test_speed_heapsort);
return NULL;
}
RUN_TESTS(all_tests);
| reem/LCTHW-Lib | tests/darray_algos_tests.c | C | mit | 3,747 |
/*
* (C) Copyright 2012 SAMSUNG Electronics
* Jaehoon Chung <jh80.chung@samsung.com>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <dm.h>
#include <malloc.h>
#include <sdhci.h>
#include <fdtdec.h>
#include <linux/libfdt.h>
#include <asm/gpio.h>
#include <asm/arch/mmc.h>
#include <asm/arch/clk.h>
#include <errno.h>
#include <asm/arch/pinmux.h>
#ifdef CONFIG_DM_MMC
struct s5p_sdhci_plat {
struct mmc_config cfg;
struct mmc mmc;
};
DECLARE_GLOBAL_DATA_PTR;
#endif
static char *S5P_NAME = "SAMSUNG SDHCI";
static void s5p_sdhci_set_control_reg(struct sdhci_host *host)
{
unsigned long val, ctrl;
/*
* SELCLKPADDS[17:16]
* 00 = 2mA
* 01 = 4mA
* 10 = 7mA
* 11 = 9mA
*/
sdhci_writel(host, SDHCI_CTRL4_DRIVE_MASK(0x3), SDHCI_CONTROL4);
val = sdhci_readl(host, SDHCI_CONTROL2);
val &= SDHCI_CTRL2_SELBASECLK_MASK(3);
val |= SDHCI_CTRL2_ENSTAASYNCCLR |
SDHCI_CTRL2_ENCMDCNFMSK |
SDHCI_CTRL2_ENFBCLKRX |
SDHCI_CTRL2_ENCLKOUTHOLD;
sdhci_writel(host, val, SDHCI_CONTROL2);
/*
* FCSEL3[31] FCSEL2[23] FCSEL1[15] FCSEL0[7]
* FCSel[1:0] : Rx Feedback Clock Delay Control
* Inverter delay means10ns delay if SDCLK 50MHz setting
* 01 = Delay1 (basic delay)
* 11 = Delay2 (basic delay + 2ns)
* 00 = Delay3 (inverter delay)
* 10 = Delay4 (inverter delay + 2ns)
*/
val = SDHCI_CTRL3_FCSEL0 | SDHCI_CTRL3_FCSEL1;
sdhci_writel(host, val, SDHCI_CONTROL3);
/*
* SELBASECLK[5:4]
* 00/01 = HCLK
* 10 = EPLL
* 11 = XTI or XEXTCLK
*/
ctrl = sdhci_readl(host, SDHCI_CONTROL2);
ctrl &= ~SDHCI_CTRL2_SELBASECLK_MASK(0x3);
ctrl |= SDHCI_CTRL2_SELBASECLK_MASK(0x2);
sdhci_writel(host, ctrl, SDHCI_CONTROL2);
}
static void s5p_set_clock(struct sdhci_host *host, u32 div)
{
/* ToDo : Use the Clock Framework */
set_mmc_clk(host->index, div);
}
static const struct sdhci_ops s5p_sdhci_ops = {
.set_clock = &s5p_set_clock,
.set_control_reg = &s5p_sdhci_set_control_reg,
};
static int s5p_sdhci_core_init(struct sdhci_host *host)
{
host->name = S5P_NAME;
host->quirks = SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_VOLTAGE |
SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_WAIT_SEND_CMD | SDHCI_QUIRK_USE_WIDE8;
host->max_clk = 52000000;
host->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
host->ops = &s5p_sdhci_ops;
if (host->bus_width == 8)
host->host_caps |= MMC_MODE_8BIT;
#ifndef CONFIG_BLK
return add_sdhci(host, 0, 400000);
#else
return 0;
#endif
}
int s5p_sdhci_init(u32 regbase, int index, int bus_width)
{
struct sdhci_host *host = calloc(1, sizeof(struct sdhci_host));
if (!host) {
printf("sdhci__host allocation fail!\n");
return -ENOMEM;
}
host->ioaddr = (void *)regbase;
host->index = index;
host->bus_width = bus_width;
return s5p_sdhci_core_init(host);
}
#if CONFIG_IS_ENABLED(OF_CONTROL)
struct sdhci_host sdhci_host[SDHCI_MAX_HOSTS];
static int do_sdhci_init(struct sdhci_host *host)
{
int dev_id, flag, ret;
flag = host->bus_width == 8 ? PINMUX_FLAG_8BIT_MODE : PINMUX_FLAG_NONE;
dev_id = host->index + PERIPH_ID_SDMMC0;
ret = exynos_pinmux_config(dev_id, flag);
if (ret) {
printf("external SD not configured\n");
return ret;
}
if (dm_gpio_is_valid(&host->pwr_gpio)) {
dm_gpio_set_value(&host->pwr_gpio, 1);
ret = exynos_pinmux_config(dev_id, flag);
if (ret) {
debug("MMC not configured\n");
return ret;
}
}
if (dm_gpio_is_valid(&host->cd_gpio)) {
ret = dm_gpio_get_value(&host->cd_gpio);
if (ret) {
debug("no SD card detected (%d)\n", ret);
return -ENODEV;
}
}
return s5p_sdhci_core_init(host);
}
static int sdhci_get_config(const void *blob, int node, struct sdhci_host *host)
{
int bus_width, dev_id;
unsigned int base;
/* Get device id */
dev_id = pinmux_decode_periph_id(blob, node);
if (dev_id < PERIPH_ID_SDMMC0 || dev_id > PERIPH_ID_SDMMC3) {
debug("MMC: Can't get device id\n");
return -EINVAL;
}
host->index = dev_id - PERIPH_ID_SDMMC0;
/* Get bus width */
bus_width = fdtdec_get_int(blob, node, "samsung,bus-width", 0);
if (bus_width <= 0) {
debug("MMC: Can't get bus-width\n");
return -EINVAL;
}
host->bus_width = bus_width;
/* Get the base address from the device node */
base = fdtdec_get_addr(blob, node, "reg");
if (!base) {
debug("MMC: Can't get base address\n");
return -EINVAL;
}
host->ioaddr = (void *)base;
gpio_request_by_name_nodev(offset_to_ofnode(node), "pwr-gpios", 0,
&host->pwr_gpio, GPIOD_IS_OUT);
gpio_request_by_name_nodev(offset_to_ofnode(node), "cd-gpios", 0,
&host->cd_gpio, GPIOD_IS_IN);
return 0;
}
static int process_nodes(const void *blob, int node_list[], int count)
{
struct sdhci_host *host;
int i, node, ret;
int failed = 0;
debug("%s: count = %d\n", __func__, count);
/* build sdhci_host[] for each controller */
for (i = 0; i < count; i++) {
node = node_list[i];
if (node <= 0)
continue;
host = &sdhci_host[i];
ret = sdhci_get_config(blob, node, host);
if (ret) {
printf("%s: failed to decode dev %d (%d)\n", __func__, i, ret);
failed++;
continue;
}
ret = do_sdhci_init(host);
if (ret && ret != -ENODEV) {
printf("%s: failed to initialize dev %d (%d)\n", __func__, i, ret);
failed++;
}
}
/* we only consider it an error when all nodes fail */
return (failed == count ? -1 : 0);
}
int exynos_mmc_init(const void *blob)
{
int count;
int node_list[SDHCI_MAX_HOSTS];
count = fdtdec_find_aliases_for_id(blob, "mmc",
COMPAT_SAMSUNG_EXYNOS_MMC, node_list,
SDHCI_MAX_HOSTS);
return process_nodes(blob, node_list, count);
}
#endif
#ifdef CONFIG_DM_MMC
static int s5p_sdhci_probe(struct udevice *dev)
{
struct s5p_sdhci_plat *plat = dev_get_platdata(dev);
struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev);
struct sdhci_host *host = dev_get_priv(dev);
int ret;
ret = sdhci_get_config(gd->fdt_blob, dev_of_offset(dev), host);
if (ret)
return ret;
ret = do_sdhci_init(host);
if (ret)
return ret;
ret = sdhci_setup_cfg(&plat->cfg, host, 0, 400000);
if (ret)
return ret;
host->mmc = &plat->mmc;
host->mmc->priv = host;
host->mmc->dev = dev;
upriv->mmc = host->mmc;
return sdhci_probe(dev);
}
static int s5p_sdhci_bind(struct udevice *dev)
{
struct s5p_sdhci_plat *plat = dev_get_platdata(dev);
int ret;
ret = sdhci_bind(dev, &plat->mmc, &plat->cfg);
if (ret)
return ret;
return 0;
}
static const struct udevice_id s5p_sdhci_ids[] = {
{ .compatible = "samsung,exynos4412-sdhci"},
{ }
};
U_BOOT_DRIVER(s5p_sdhci_drv) = {
.name = "s5p_sdhci",
.id = UCLASS_MMC,
.of_match = s5p_sdhci_ids,
.bind = s5p_sdhci_bind,
.ops = &sdhci_ops,
.probe = s5p_sdhci_probe,
.priv_auto_alloc_size = sizeof(struct sdhci_host),
.platdata_auto_alloc_size = sizeof(struct s5p_sdhci_plat),
};
#endif /* CONFIG_DM_MMC */
| guileschool/BEAGLEBONE-tutorials | BBB-firmware/u-boot-v2018.05-rc2/drivers/mmc/s5p_sdhci.c | C | mit | 6,778 |
/*
* The MIT License (MIT): http://opensource.org/licenses/mit-license.php
*
* Copyright (c) 2013-2014, Chris Behrens
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#define __FIL_BUILDING_LOCKING__
#include "core/filament.h"
#include "locking/fil_lock.h"
typedef struct _pyfil_lock {
PyObject_HEAD
int locked;
FilWaiterList waiters;
} PyFilLock;
typedef struct _pyfil_rlock {
PyFilLock lock; /* must remain first. */
uint64_t owner;
uint64_t count;
} PyFilRLock;
static PyFilLock *_lock_new(PyTypeObject *type, PyObject *args, PyObject *kw)
{
PyFilLock *self = (PyFilLock *)type->tp_alloc(type, 0);
if (self != NULL)
{
fil_waiterlist_init(self->waiters);
}
return self;
}
static int _lock_init(PyFilLock *self, PyObject *args, PyObject *kwargs)
{
return 0;
}
static void _lock_dealloc(PyFilLock *self)
{
assert(fil_waiterlist_empty(self->waiters));
PyObject_Del(self);
}
static int __lock_acquire(PyFilLock *lock, int blocking, struct timespec *ts)
{
if (!lock->locked && fil_waiterlist_empty(lock->waiters))
{
lock->locked = 1;
return 0;
}
if (!blocking)
{
return 1;
}
int err = fil_waiterlist_wait(lock->waiters, ts, NULL);
if (err < 0)
{
return err;
}
assert(lock->locked == 1);
return 0;
}
static int __lock_release(PyFilLock *lock)
{
if (!lock->locked)
{
PyErr_SetString(PyExc_RuntimeError, "release without acquire");
return -1;
}
if (fil_waiterlist_empty(lock->waiters))
{
lock->locked = 0;
return 0;
}
/* leave 'locked' set because a different thread is just
* going to grab it anyway. This prevents some races without
* additional work to resolve them.
*/
fil_waiterlist_signal_first(lock->waiters);
return 0;
}
static int __rlock_acquire(PyFilRLock *lock, int blocking, struct timespec *ts)
{
uint64_t owner;
owner = fil_get_ident();
if (!lock->lock.locked && fil_waiterlist_empty(lock->lock.waiters))
{
lock->lock.locked = 1;
lock->owner = owner;
lock->count = 1;
return 0;
}
if (owner == lock->owner)
{
lock->count++;
return 0;
}
if (!blocking)
{
return 1;
}
int err = fil_waiterlist_wait(lock->lock.waiters, ts, NULL);
if (err)
{
return err;
}
assert(lock->lock.locked == 1);
lock->owner = owner;
lock->count = 1;
return 0;
}
static int __rlock_release(PyFilRLock *lock)
{
if (!lock->lock.locked || (fil_get_ident() != lock->owner))
{
PyErr_SetString(PyExc_RuntimeError, "cannot release un-acquired lock");
return -1;
}
if (--lock->count > 0)
{
return 0;
}
lock->owner = 0;
if (fil_waiterlist_empty(lock->lock.waiters))
{
lock->lock.locked = 0;
return 0;
}
/* leave 'locked' set because a different thread is just
* going to grab it anyway. This prevents some races without
* additional work to resolve them.
*/
fil_waiterlist_signal_first(lock->lock.waiters);
return 0;
}
PyDoc_STRVAR(_lock_acquire_doc, "Acquire the lock.");
static PyObject *_lock_acquire(PyFilLock *self, PyObject *args, PyObject *kwargs)
{
static char *keywords[] = {"blocking", "timeout", NULL};
PyObject *blockingobj = NULL;
PyObject *timeout = NULL;
struct timespec tsbuf;
struct timespec *ts;
int blocking;
int err;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!O",
keywords,
&PyBool_Type,
&blockingobj, &timeout))
{
return NULL;
}
if (fil_timespec_from_pyobj_interval(timeout, &tsbuf, &ts) < 0)
{
return NULL;
}
blocking = (blockingobj == NULL || blockingobj == Py_True);
err = __lock_acquire(self, blocking, ts);
if (err < 0 && err != -ETIMEDOUT)
{
return NULL;
}
if (err == 0)
{
Py_INCREF(Py_True);
return Py_True;
}
Py_INCREF(Py_False);
return Py_False;
}
PyDoc_STRVAR(_lock_locked_doc, "Is the lock locked?");
static PyObject *_lock_locked(PyFilLock *self)
{
PyObject *res = (self->locked || !fil_waiterlist_empty(self->waiters)) ? Py_True : Py_False;
Py_INCREF(res);
return res;
}
PyDoc_STRVAR(_lock_release_doc, "Release the lock.");
static PyObject *_lock_release(PyFilLock *self)
{
if (__lock_release(self) < 0)
{
return NULL;
}
Py_RETURN_NONE;
}
static PyObject *_lock_enter(PyFilLock *self)
{
int err = __lock_acquire(self, 1, NULL);
if (err)
{
if (!PyErr_Occurred())
{
PyErr_Format(PyExc_RuntimeError, "unexpected failure in Lock.__enter__: %d", err);
}
return NULL;
}
Py_INCREF(self);
return (PyObject *)self;
}
static PyObject *_lock_exit(PyFilLock *self, PyObject *args)
{
return _lock_release(self);
}
PyDoc_STRVAR(_rlock_acquire_doc, "Acquire the lock.");
static PyObject *_rlock_acquire(PyFilRLock *self, PyObject *args, PyObject *kwargs)
{
static char *keywords[] = {"blocking", "timeout", NULL};
PyObject *blockingobj = NULL;
PyObject *timeout = NULL;
struct timespec tsbuf;
struct timespec *ts;
int blocking;
int err;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!O",
keywords,
&PyBool_Type,
&blockingobj, &timeout))
{
return NULL;
}
if (fil_timespec_from_pyobj_interval(timeout, &tsbuf, &ts) < 0)
{
return NULL;
}
blocking = (blockingobj == NULL || blockingobj == Py_True);
err = __rlock_acquire(self, blocking, ts);
if (err < 0 && err != -ETIMEDOUT)
{
return NULL;
}
if (err == 0)
{
Py_INCREF(Py_True);
return Py_True;
}
Py_INCREF(Py_False);
return Py_False;
}
PyDoc_STRVAR(_rlock_locked_doc, "Is the lock locked (by someone else)?");
static PyObject *_rlock_locked(PyFilRLock *self)
{
uint64_t owner = fil_get_ident();
PyObject *res = ((self->lock.locked && self->owner != owner) ||
!fil_waiterlist_empty(self->lock.waiters)) ? Py_True : Py_False;
Py_INCREF(res);
return res;
}
PyDoc_STRVAR(_rlock_release_doc, "Release the lock.");
static PyObject *_rlock_release(PyFilRLock *self)
{
if (__rlock_release(self) < 0)
{
return NULL;
}
Py_RETURN_NONE;
}
static PyObject *_rlock_enter(PyFilRLock *self)
{
int err = __rlock_acquire(self, 1, NULL);
if (err)
{
if (!PyErr_Occurred())
{
PyErr_Format(PyExc_RuntimeError, "unexpected failure in RLock.__enter__: %d", err);
}
return NULL;
}
Py_INCREF(self);
return (PyObject *)self;
}
static PyObject *_rlock_exit(PyFilRLock *self, PyObject *args)
{
return _rlock_release(self);
}
static PyMethodDef _lock_methods[] = {
{ "acquire", (PyCFunction)_lock_acquire, METH_VARARGS|METH_KEYWORDS, _lock_acquire_doc },
{ "release", (PyCFunction)_lock_release, METH_NOARGS, _lock_release_doc },
{ "locked", (PyCFunction)_lock_locked, METH_NOARGS, _lock_locked_doc },
{ "__enter__", (PyCFunction)_lock_enter, METH_NOARGS, NULL },
{ "__exit__", (PyCFunction)_lock_exit, METH_VARARGS, NULL },
{ NULL, NULL }
};
static PyMethodDef _rlock_methods[] = {
{ "acquire", (PyCFunction)_rlock_acquire, METH_VARARGS|METH_KEYWORDS, _rlock_acquire_doc },
{ "release", (PyCFunction)_rlock_release, METH_NOARGS, _rlock_release_doc },
{ "locked", (PyCFunction)_rlock_locked, METH_NOARGS, _rlock_locked_doc },
{ "__enter__", (PyCFunction)_rlock_enter, METH_NOARGS, NULL },
{ "__exit__", (PyCFunction)_rlock_exit, METH_VARARGS, NULL },
{ NULL, NULL }
};
static PyTypeObject _lock_type = {
PyVarObject_HEAD_INIT(0, 0)
"_filament.locking.Lock", /* tp_name */
sizeof(PyFilLock), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)_lock_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
FIL_DEFAULT_TPFLAGS, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
_lock_methods, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
(initproc)_lock_init, /* tp_init */
PyType_GenericAlloc, /* tp_alloc */
(newfunc)_lock_new, /* tp_new */
PyObject_Del, /* tp_free */
0, /* tp_is_gc */
0, /* tp_bases */
0, /* tp_mro */
0, /* tp_cache */
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
0, /* tp_version_tag */
};
/* Re-entrant lock. We can use the same calls here */
static PyTypeObject _rlock_type = {
PyVarObject_HEAD_INIT(0, 0)
"_filament.locking.RLock", /* tp_name */
sizeof(PyFilRLock), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)_lock_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
FIL_DEFAULT_TPFLAGS, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
_rlock_methods, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
(initproc)_lock_init, /* tp_init */
PyType_GenericAlloc, /* tp_alloc */
(newfunc)_lock_new, /* tp_new */
PyObject_Del, /* tp_free */
0, /* tp_is_gc */
0, /* tp_bases */
0, /* tp_mro */
0, /* tp_cache */
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
0, /* tp_version_tag */
};
/****************/
PyFilLock *fil_lock_alloc(void)
{
return _lock_new(&_lock_type, NULL, NULL);
}
PyFilRLock *fil_rlock_alloc(void)
{
return (PyFilRLock *)_lock_new(&_rlock_type, NULL, NULL);
}
int fil_lock_acquire(PyFilLock *lock, int blocking, struct timespec *ts)
{
return __lock_acquire(lock, blocking, ts);
}
int fil_rlock_acquire(PyFilRLock *rlock, int blocking, struct timespec *ts)
{
return __rlock_acquire(rlock, blocking, ts);
}
int fil_lock_release(PyFilLock *lock)
{
return __lock_release(lock);
}
int fil_rlock_release(PyFilRLock *rlock)
{
return __rlock_release(rlock);
}
int fil_lock_type_init(PyObject *module)
{
PyFilCore_Import();
if (PyType_Ready(&_lock_type) < 0)
{
return -1;
}
if (PyType_Ready(&_rlock_type) < 0)
{
return -1;
}
Py_INCREF((PyObject *)&_lock_type);
if (PyModule_AddObject(module, "Lock", (PyObject *)&_lock_type) != 0)
{
Py_DECREF((PyObject *)&_lock_type);
return -1;
}
Py_INCREF((PyObject *)&_rlock_type);
if (PyModule_AddObject(module, "RLock", (PyObject *)&_rlock_type) != 0)
{
Py_DECREF((PyObject *)&_rlock_type);
return -1;
}
return 0;
}
| comstud/filament | src/locking/fil_lock.c | C | mit | 16,390 |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include "liste.h"
#include "atl.h"
#include "es.h"
#define FILE_ATLETI "atleti.txt"
#define FILE_ESERCIZI "esercizi.txt"
#define MAX_NOME 25
#define LUNG_CODICE 5
#define non_strutturato ;;
#ifdef _WIN32
#define F_CLEAR "cls"
#else
#define F_CLEAR "clear"
#endif
Atleta inputCercaAtleta(Lista);
void makeDotTxt(char*, char*);
FILE *inputStampaSuFile();
int main() {
FILE *fp, *fEs;
Atleta tmpAtl=newAtleta();
Lista atleti=newAtlCollection();
tabellaEs esercizi=newEsCollection();
char uInput[100], fileTxt[10];
char codice[LUNG_CODICE+1], nome[MAX_NOME+1], cognome[MAX_NOME+1];
char categoria[MAX_NOME+1], data[11];
int scelta=-1;
int x,y;
// file ESERCIZI
if ((fp=fopen(FILE_ESERCIZI, "r"))==NULL){
printf("Errore! Impossibile aprire il file \"%s\"!\n", FILE_ESERCIZI);
exit(1);
}
caricaEsercizi(esercizi, fp);
fclose(fp);
// -------------------------------------------------------------------------
// file degli ATLETI (riciclo fp)
if ((fp=fopen(FILE_ATLETI, "r"))==NULL){
printf("Errore! Impossibile aprire il file \"%s\"!\n", FILE_ATLETI);
exit(1);
}
caricaAtleti(atleti, fp);
fclose(fp);
// menu'
for(non_strutturato) {
system(F_CLEAR);
puts("01. Stampa contenuto anagrafica");
puts("02. Stampa gli atleti divisi per categoria");
puts("03. Aggiornamento monte ore settimanali");
puts("04. Ricerca atleta per codice o cognome parziale");
puts("05. Aggiungi un atleta");
puts("06. Cancella un atleta");
for (x=80; x-->0; printf("-")); puts(""); // linea orizzontale
puts("07. Caricare / salvare esercizi di un atleta");
puts("08. Modificare set / ripetizioni di un esercizio di un atleta");
puts("09. Aggiungi un esercizio");
puts("10. Cancella un esercizio");
for (x=80; x-->0; printf("-")); puts("");
puts("0. Esci");
puts("");
printf("> ");
scanf("%d", &scelta);
switch (scelta) {
case 0:
return 0;
case 1: // stampa contetnuto anagrafica
fp=inputStampaSuFile();
stampaAnagrafica(atleti, fp);
if (fp!=stdout) fclose(fp);
break;
case 2: // stamapa divisi per categoria
stampaPerCategoria(atleti);
break;
case 3: // aggiornamento monte ore settimanli
if ((tmpAtl=inputCercaAtleta(atleti))==NULL) break;
printf("Monte ore attuali: %d\n", getOreAtleta(tmpAtl));
printf("Nuovo monte ore: ");
scanf("%d", &x);
modificaOreAtl(tmpAtl, x);
puts("Monte ore aggiornato correttamente!");
break;
case 4: // ricerca atleta
inputCercaAtleta(atleti);
break;
case 5: // aggiungi atleta
printf("Codice: ");
scanf("%s", codice);
printf("Nome: ");
scanf("%s", nome);
printf("Cognome: ");
scanf("%s", cognome);
printf("Cateogria: ");
scanf("%s", categoria);
printf("Data : ");
scanf("%s", data);
printf("Monte ore: ");
scanf("%d", &x);
aggiungiAtletaByPar(atleti, codice, nome, cognome, categoria, data, x);
puts("Atleta aggiunto correttamente!");
break;
case 6: // cancellazione atleta
if ((tmpAtl=inputCercaAtleta(atleti))==NULL) break;
printf("Rimuovere l'atleta trovato? [s/n] ");
scanf("%s", uInput);
if (tolower(uInput[0])=='s') {
cancellaAtleta(atleti, tmpAtl);
puts("Atleta cancellato con successo!");
}
break;
case 7:
// caricare / salvare esericizi per un atleta
if ((tmpAtl=inputCercaAtleta(atleti))==NULL) break;
if (eserciziCaricatiAtl(tmpAtl)) {
// se gli esercizi sono già stati caricati
fp=inputStampaSuFile();
stampaTuttiEs(getListaEsercizi(tmpAtl), fp);
break;
}
//else: cerco di caricare il piano esercizi per l'altleta
makeDotTxt(fileTxt, getCodiceAtleta(tmpAtl));
if ((fEs=fopen(fileTxt, "r"))!=NULL) {
// se ho trovato un file con il codice dell'atleta...
caricaPianoEsercizi(getListaEsercizi(tmpAtl), esercizi, fEs);
puts("Piano degli esercizi caricato correttamente");
fclose(fEs);
} else {
printf("Non ho trovato un piano esercizi per %s\n",
getCodiceAtleta(tmpAtl));
}
break;
case 8:
// modificare il numero di set/ripetizioni
if ((tmpAtl=inputCercaAtleta(atleti))==NULL) break;
if (!eserciziCaricatiAtl(tmpAtl)){
printf("Esercizi non caricati per \"%s\"", getCodiceAtleta(tmpAtl));
break;
}
// se gli esercizi sono già stati caricati
printf("Nome dell'esercizio per modificare set/ripetizioni: ");
scanf("%s", uInput);
printf("Nuovo n* set: "); scanf("%d", &x);
printf("Nuovo n* ripetizioni: "); scanf("%d", &y);
if(modificaPianoEsByName(getListaEsercizi(tmpAtl), uInput, x, y)){
puts("Modifiche effettuate con successo!");
} else {
puts("Errore! Esercizio non trovato.");
}
break;
case 9:
// aggiunta di un esercizio
// ho bisogno sia dei set/ripetizioni da mettere nella lista, sia
// dell'esercizio da far pountare quindi del nome, della
// categoria e del tipo di esercizio
if ((tmpAtl=inputCercaAtleta(atleti))==NULL) break;
printf("Nome dell'esercizio da aggiungere: ");
scanf("%s", uInput);
printf("Nuovo n* set: "); scanf("%d", &x);
printf("Nuovo n* ripetizioni: "); scanf("%d", &y);
if(aggiungiEs(getListaEsercizi(tmpAtl), esercizi, uInput, x, y)) {
puts("Esercizio aggiunto con successo!");
} else {
printf("Impossibile trovare l'esercizio \"%s\"!\n", uInput);
}
break;
case 10:
// cancellazione di un esercizio
if ((tmpAtl=inputCercaAtleta(atleti))==NULL) break;
if (!eserciziCaricatiAtl(tmpAtl)){
printf("Esercizi non caricati per \"%s\"", getCodiceAtleta(tmpAtl));
break;
}
// se gli esercizi sono già stati caricati
printf("Nome dell'esercizio da cancellare: ");
scanf("%s", uInput);
// scorro tutti gli elementi della lista con p=head della lista
if (cancellaPianoEsByName(getListaEsercizi(tmpAtl), uInput))
puts("Esercizio cancellato con successo!");
else
puts("Errore! Esercizio non trovato!");
break;
default:
puts("Comando non trovato.");
}
getc(stdin); // prendo il ritorno a capo della scanf
printf("\nPremere invio per tornare al menu'... ");
getc(stdin); // aspetto che l'utente prema invio
}
return 0;
}
Atleta inputCercaAtleta(Lista l) {
char c[MAX_NOME+1];
Atleta atl;
printf("Codice o cognome parziale dell'atleta: ");
scanf("%s", c);
if ((atl=cercaAtleta(l, c))!=NULL) {
stampaAtleta(atl, stdout);
return atl;
} else {
puts("Atleta non trovato");
return NULL;
}
}
FILE *inputStampaSuFile() {
FILE *fp;
char c[3], f[100];
printf("Stampa su file? [s/n] ");
scanf("%s", c);
if (tolower(c[0])=='s') {
printf("Inserisci il nome del file: ");
scanf("%s", f);
if ((fp=fopen(f, "w"))==NULL) {
printf("Errore! Impossibile aprire il file \"%s\"", f);
printf("Stampo a video...\n");
return stdout;
} else {
return fp;
}
} else {
return stdout;
}
}
void makeDotTxt(char *dst, char *src) {
strcpy(dst, src);
strcat(dst, ".txt");
}
| supermirtillo/polito-c-apa | L08/E04/main.c | C | mit | 8,509 |
//
// console777.c
// crypto777
//
// Created by James on 4/9/15.
// Copyright (c) 2015 jl777. All rights reserved.
//
#ifdef DEFINES_ONLY
#ifndef crypto777_console777_h
#define crypto777_console777_h
#include <stdio.h>
#include <stdio.h>
#include <ctype.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include "../includes/cJSON.h"
#include "../KV/kv777.c"
#include "../common/system777.c"
#endif
#else
#ifndef crypto777_console777_c
#define crypto777_console777_c
#ifndef crypto777_console777_h
#define DEFINES_ONLY
#include "console777.c"
#undef DEFINES_ONLY
#endif
int32_t getline777(char *line,int32_t max)
{
#ifndef _WIN32
static char prevline[1024];
struct timeval timeout;
fd_set fdset;
int32_t s;
line[0] = 0;
FD_ZERO(&fdset);
FD_SET(STDIN_FILENO,&fdset);
timeout.tv_sec = 0, timeout.tv_usec = 10000;
if ( (s= select(1,&fdset,NULL,NULL,&timeout)) < 0 )
fprintf(stderr,"wait_for_input: error select s.%d\n",s);
else
{
if ( FD_ISSET(STDIN_FILENO,&fdset) > 0 && fgets(line,max,stdin) == line )
{
line[strlen(line)-1] = 0;
if ( line[0] == 0 || (line[0] == '.' && line[1] == 0) )
strcpy(line,prevline);
else strcpy(prevline,line);
}
}
return((int32_t)strlen(line));
#else
fgets(line, max, stdin);
line[strlen(line)-1] = 0;
return((int32_t)strlen(line));
#endif
}
int32_t settoken(char *token,char *line)
{
int32_t i;
for (i=0; i<32&&line[i]!=0; i++)
{
if ( line[i] == ' ' || line[i] == '\n' || line[i] == '\t' || line[i] == '\b' || line[i] == '\r' )
break;
token[i] = line[i];
}
token[i] = 0;
return(i);
}
void update_alias(char *line)
{
char retbuf[8192],alias[1024],*value; int32_t i,err;
if ( (i= settoken(&alias[1],line)) < 0 )
return;
if ( line[i] == 0 )
value = &line[i];
else value = &line[i+1];
line[i] = 0;
alias[0] = '#';
printf("i.%d alias.(%s) value.(%s)\n",i,alias,value);
if ( value[0] == 0 )
printf("warning value for %s is null\n",alias);
kv777_findstr(retbuf,sizeof(retbuf),SUPERNET.alias,alias);
if ( strcmp(retbuf,value) == 0 )
printf("UNCHANGED ");
else printf("%s ",retbuf[0] == 0 ? "CREATE" : "UPDATE");
printf(" (%s) -> (%s)\n",alias,value);
if ( (err= kv777_addstr(SUPERNET.alias,alias,value)) != 0 )
printf("error.%d updating alias database\n",err);
}
char *expand_aliases(char *_expanded,char *_expanded2,int32_t max,char *line)
{
char alias[64],value[8192],*expanded,*otherbuf;
int32_t i,j,k,len=0,flag = 1;
expanded = _expanded, otherbuf = _expanded2;
while ( len < max-8192 && flag != 0 )
{
flag = 0;
len = (int32_t)strlen(line);
for (i=j=0; i<len; i++)
{
if ( line[i] == '#' )
{
if ( (k= settoken(&alias[1],&line[i+1])) <= 0 )
continue;
i += k;
alias[0] = '#';
if ( kv777_findstr(value,sizeof(value),SUPERNET.alias,alias) != 0 )
{
if ( value[0] != 0 )
for (k=0; value[k]!=0; k++)
expanded[j++] = value[k];
expanded[j] = 0;
//printf("found (%s) -> (%s) [%s]\n",alias,value,expanded);
flag++;
}
} else expanded[j++] = line[i];
}
expanded[j] = 0;
line = expanded;
if ( expanded == _expanded2 )
expanded = _expanded, otherbuf = _expanded2;
else expanded = _expanded2, otherbuf = _expanded;
}
//printf("(%s) -> (%s) len.%d flag.%d\n",line,expanded,len,flag);
return(line);
}
char *localcommand(char *line)
{
char *retstr;
if ( strcmp(line,"list") == 0 )
{
if ( (retstr= relays_jsonstr(0,0)) != 0 )
{
printf("%s\n",retstr);
free(retstr);
}
return(0);
}
else if ( strncmp(line,"alias",5) == 0 )
{
update_alias(line+6);
return(0);
}
else if ( strcmp(line,"help") == 0 )
{
printf("local commands:\nhelp, list, alias <name> <any string> then #name is expanded to <any string>\n");
printf("alias expansions are iterated, so be careful with recursive macros!\n\n");
printf("<plugin name> <method> {json args} -> invokes plugin with method and args, \"myipaddr\" and \"NXT\" are default attached\n\n");
printf("network commands: default timeout is used if not specified\n");
printf("relay <plugin name> <method> {json args} -> will send to random relay\n");
printf("peers <plugin name> <method> {json args} -> will send all peers\n");
printf("!<plugin name> <method> {json args} -> sends to random relay which will send to all its peers and combine results.\n\n");
printf("publish shortcut: pub <any string> -> invokes the subscriptions plugin with publish method and all subscribers will be sent <any string>\n\n");
printf("direct to specific relay needs to have a direct connection established first:\nrelay direct or peers direct <ipaddr>\n");
printf("in case you cant directly reach a specific relay with \"peers direct <ipaddr>\" you can add \"!\" and let a relay broadcast\n");
printf("without an <ipaddr> it will connect to a random relay. Once directly connected, commands are sent by:\n");
printf("<ipaddress> {\"plugin\":\"<name>\",\"method\":\"<methodname>\",...}\n");
printf("responses to direct requests are sent through as a subscription feed\n\n");
printf("\"relay join\" adds your node to the list of relay nodes, your node will need to stay in sync with the other relays\n");
//printf("\"relay mailbox <64bit number> <name>\" creates synchronized storage in all relays\n");
return(0);
}
return(line);
}
char *parse_expandedline(char *plugin,char *method,int32_t *timeoutp,char *line,int32_t broadcastflag)
{
int32_t i,j; char numstr[64],*pubstr,*cmdstr = 0; cJSON *json; uint64_t tag;
for (i=0; i<512&&line[i]!=' '&&line[i]!=0; i++)
plugin[i] = line[i];
plugin[i] = 0;
*timeoutp = 0;
pubstr = line;
if ( strcmp(plugin,"pub") == 0 )
strcpy(plugin,"subscriptions"), strcpy(method,"publish"), pubstr += 4;
else if ( line[i+1] != 0 )
{
for (++i,j=0; i<512&&line[i]!=' '&&line[i]!=0; i++,j++)
method[j] = line[i];
method[j] = 0;
} else method[0] = 0;
if ( (json= cJSON_Parse(line+i+1)) == 0 )
json = cJSON_CreateObject();
if ( json != 0 )
{
if ( strcmp("direct",method) == 0 && cJSON_GetObjectItem(json,"myipaddr") == 0 )
cJSON_AddItemToObject(json,"myipaddr",cJSON_CreateString(SUPERNET.myipaddr));
if ( cJSON_GetObjectItem(json,"tag") == 0 )
randombytes((void *)&tag,sizeof(tag)), sprintf(numstr,"%llu",(long long)tag), cJSON_AddItemToObject(json,"tag",cJSON_CreateString(numstr));
//if ( cJSON_GetObjectItem(json,"NXT") == 0 )
// cJSON_AddItemToObject(json,"NXT",cJSON_CreateString(SUPERNET.NXTADDR));
*timeoutp = get_API_int(cJSON_GetObjectItem(json,"timeout"),0);
if ( plugin[0] == 0 )
strcpy(plugin,"relay");
if ( cJSON_GetObjectItem(json,"plugin") == 0 )
cJSON_AddItemToObject(json,"plugin",cJSON_CreateString(plugin));
else copy_cJSON(plugin,cJSON_GetObjectItem(json,"plugin"));
if ( method[0] == 0 )
strcpy(method,"help");
cJSON_AddItemToObject(json,"method",cJSON_CreateString(method));
if ( broadcastflag != 0 )
cJSON_AddItemToObject(json,"broadcast",cJSON_CreateString("allrelays"));
cmdstr = cJSON_Print(json), _stripwhite(cmdstr,' ');
return(cmdstr);
}
else return(clonestr(pubstr));
}
char *process_user_json(char *plugin,char *method,char *cmdstr,int32_t broadcastflag,int32_t timeout)
{
struct daemon_info *find_daemoninfo(int32_t *indp,char *name,uint64_t daemonid,uint64_t instanceid);
uint32_t nonce; int32_t tmp,len; char *retstr;//,tokenized[8192];
len = (int32_t)strlen(cmdstr) + 1;
//printf("userjson.(%s).%d plugin.(%s) broadcastflag.%d method.(%s)\n",cmdstr,len,plugin,broadcastflag,method);
if ( broadcastflag != 0 || strcmp(plugin,"relay") == 0 )
{
if ( strcmp(method,"busdata") == 0 )
retstr = busdata_sync(&nonce,cmdstr,broadcastflag==0?0:"allnodes",0);
else retstr = clonestr("{\"error\":\"direct load balanced calls deprecated, use busdata\"}");
}
//else if ( strcmp(plugin,"peers") == 0 )
// retstr = nn_allrelays((uint8_t *)cmdstr,len,timeout,0);
else if ( find_daemoninfo(&tmp,plugin,0,0) != 0 )
{
//len = construct_tokenized_req(tokenized,cmdstr,SUPERNET.NXTACCTSECRET,broadcastflag!=0?"allnodes":0);
//printf("console.(%s)\n",tokenized);
retstr = plugin_method(-1,0,1,plugin,method,0,0,cmdstr,len,timeout != 0 ? timeout : 0,0);
}
else retstr = clonestr("{\"error\":\"invalid command\"}");
return(retstr);
}
void process_userinput(char *_line)
{
static char *line,*line2;
char plugin[512],ipaddr[1024],method[512],*cmdstr,*retstr; cJSON *json; int timeout,broadcastflag = 0;
printf("[%s]\n",_line);
if ( line == 0 )
line = calloc(1,65536), line2 = calloc(1,65536);
expand_aliases(line,line2,65536,_line);
if ( (line= localcommand(line)) == 0 )
return;
if ( line[0] == '!' )
broadcastflag = 1, line++;
if ( (json= cJSON_Parse(line)) != 0 )
{
char *process_nn_message(int32_t sock,char *jsonstr);
free_json(json);
char *SuperNET_JSON(char *jsonstr);
retstr = SuperNET_JSON(line);
//retstr = process_nn_message(-1,line);
//retstr = nn_loadbalanced((uint8_t *)line,(int32_t)strlen(line)+1);
printf("console.(%s) -> (%s)\n",line,retstr);
return;
} else printf("cant parse.(%s)\n",line);
settoken(ipaddr,line);
printf("expands to: %s [%s] %s\n",broadcastflag != 0 ? "broadcast": "",line,ipaddr);
if ( is_ipaddr(ipaddr) != 0 )
{
line += strlen(ipaddr) + 1;
if ( (cmdstr = parse_expandedline(plugin,method,&timeout,line,broadcastflag)) != 0 )
{
printf("ipaddr.(%s) (%s)\n",ipaddr,line);
//retstr = nn_direct(ipaddr,(uint8_t *)line,(int32_t)strlen(line)+1);
printf("deprecated (%s) -> (%s)\n",line,cmdstr);
free(cmdstr);
}
return;
}
if ( (cmdstr= parse_expandedline(plugin,method,&timeout,line,broadcastflag)) != 0 )
{
retstr = process_user_json(plugin,method,cmdstr,broadcastflag,timeout != 0 ? timeout : SUPERNET.PLUGINTIMEOUT);
printf("CONSOLE (%s) -> (%s) -> (%s)\n",line,cmdstr,retstr);
free(cmdstr);
}
}
#endif
#endif
| mezzovide/btcd | libjl777/plugins/common/console777.c | C | mit | 11,054 |
#include "icu.h"
#include "unicode/uspoof.h"
#define GET_SPOOF_CHECKER(_data) icu_spoof_checker_data* _data; \
TypedData_Get_Struct(self, icu_spoof_checker_data, &icu_spoof_checker_type, _data)
VALUE rb_cICU_SpoofChecker;
VALUE rb_mChecks;
VALUE rb_mRestrictionLevel;
typedef struct {
VALUE rb_instance;
USpoofChecker* service;
} icu_spoof_checker_data;
static void spoof_checker_free(void* _this)
{
icu_spoof_checker_data* this = _this;
uspoof_close(this->service);
}
static size_t spoof_checker_memsize(const void* _)
{
return sizeof(icu_spoof_checker_data);
}
static const rb_data_type_t icu_spoof_checker_type = {
"icu/spoof_checker",
{NULL, spoof_checker_free, spoof_checker_memsize,},
0, 0,
RUBY_TYPED_FREE_IMMEDIATELY,
};
VALUE spoof_checker_alloc(VALUE self)
{
icu_spoof_checker_data* this;
return TypedData_Make_Struct(self, icu_spoof_checker_data, &icu_spoof_checker_type, this);
}
VALUE spoof_checker_initialize(VALUE self)
{
GET_SPOOF_CHECKER(this);
this->rb_instance = self;
this->service = FALSE;
UErrorCode status = U_ZERO_ERROR;
this->service = uspoof_open(&status);
if (U_FAILURE(status)) {
icu_rb_raise_icu_error(status);
}
return self;
}
static inline VALUE spoof_checker_get_restriction_level_internal(const icu_spoof_checker_data* this)
{
URestrictionLevel level = uspoof_getRestrictionLevel(this->service);
return INT2NUM(level);
}
VALUE spoof_checker_get_restriction_level(VALUE self)
{
GET_SPOOF_CHECKER(this);
return spoof_checker_get_restriction_level_internal(this);
}
VALUE spoof_checker_set_restriction_level(VALUE self, VALUE level)
{
GET_SPOOF_CHECKER(this);
uspoof_setRestrictionLevel(this->service, NUM2INT(level));
return spoof_checker_get_restriction_level_internal(this);
}
static inline VALUE spoof_checker_get_checks_internal(const icu_spoof_checker_data* this)
{
UErrorCode status = U_ZERO_ERROR;
int32_t checks = uspoof_getChecks(this->service, &status);
if (U_FAILURE(status)) {
icu_rb_raise_icu_error(status);
}
return INT2NUM(checks);
}
VALUE spoof_checker_get_checks(VALUE self)
{
GET_SPOOF_CHECKER(this);
return spoof_checker_get_checks_internal(this);
}
VALUE spoof_checker_set_checks(VALUE self, VALUE checks)
{
GET_SPOOF_CHECKER(this);
UErrorCode status = U_ZERO_ERROR;
uspoof_setChecks(this->service, NUM2INT(checks), &status);
if (U_FAILURE(status)) {
icu_rb_raise_icu_error(status);
}
return spoof_checker_get_checks_internal(this);
}
VALUE spoof_checker_confusable(VALUE self, VALUE str_a, VALUE str_b)
{
StringValue(str_a);
StringValue(str_b);
GET_SPOOF_CHECKER(this);
VALUE tmp_a = icu_ustring_from_rb_str(str_a);
VALUE tmp_b = icu_ustring_from_rb_str(str_b);
UErrorCode status = U_ZERO_ERROR;
int32_t result = uspoof_areConfusable(this->service,
icu_ustring_ptr(tmp_a),
icu_ustring_len(tmp_a),
icu_ustring_ptr(tmp_b),
icu_ustring_len(tmp_b),
&status);
return INT2NUM(result);
}
VALUE spoof_checker_get_skeleton(VALUE self, VALUE str)
{
StringValue(str);
GET_SPOOF_CHECKER(this);
VALUE in = icu_ustring_from_rb_str(str);
VALUE out = icu_ustring_init_with_capa_enc(icu_ustring_capa(in), ICU_RUBY_ENCODING_INDEX);
int retried = FALSE;
int32_t len_bytes;
UErrorCode status = U_ZERO_ERROR;
do {
// UTF-8 version does the conversion internally so we relies on UChar version here!
len_bytes = uspoof_getSkeleton(this->service, 0 /* deprecated */,
icu_ustring_ptr(in), icu_ustring_len(in),
icu_ustring_ptr(out), icu_ustring_capa(out),
&status);
if (!retried && status == U_BUFFER_OVERFLOW_ERROR) {
retried = TRUE;
icu_ustring_resize(out, len_bytes + RUBY_C_STRING_TERMINATOR_SIZE);
status = U_ZERO_ERROR;
} else if (U_FAILURE(status)) {
icu_rb_raise_icu_error(status);
} else { // retried == true && U_SUCCESS(status)
break;
}
} while (retried);
return icu_ustring_to_rb_enc_str_with_len(out, len_bytes);
}
VALUE spoof_checker_check(VALUE self, VALUE rb_str)
{
StringValue(rb_str);
GET_SPOOF_CHECKER(this);
UErrorCode status = U_ZERO_ERROR;
int32_t result = 0;
// TODO: Migrate to uspoof_check2UTF8 once it's not draft
if (icu_is_rb_str_as_utf_8(rb_str)) {
result = uspoof_checkUTF8(this->service,
RSTRING_PTR(rb_str),
RSTRING_LENINT(rb_str),
NULL,
&status);
} else {
VALUE in = icu_ustring_from_rb_str(rb_str);
// TODO: Migrate to uspoof_check once it's not draft
result = uspoof_check(this->service,
icu_ustring_ptr(in),
icu_ustring_len(in),
NULL,
&status);
}
if (U_FAILURE(status)) {
icu_rb_raise_icu_error(status);
}
return INT2NUM(result);
}
static const char* k_checks_name = "@checks";
VALUE spoof_checker_available_checks(VALUE klass)
{
VALUE iv = rb_iv_get(klass, k_checks_name);
if (NIL_P(iv)) {
iv = rb_hash_new();
rb_hash_aset(iv, ID2SYM(rb_intern("single_script_confusable")), INT2NUM(USPOOF_SINGLE_SCRIPT_CONFUSABLE));
rb_hash_aset(iv, ID2SYM(rb_intern("mixed_script_confusable")), INT2NUM(USPOOF_MIXED_SCRIPT_CONFUSABLE));
rb_hash_aset(iv, ID2SYM(rb_intern("whole_script_confusable")), INT2NUM(USPOOF_WHOLE_SCRIPT_CONFUSABLE));
rb_hash_aset(iv, ID2SYM(rb_intern("confusable")), INT2NUM(USPOOF_CONFUSABLE));
// USPOOF_ANY_CASE deprecated in 58
rb_hash_aset(iv, ID2SYM(rb_intern("restriction_level")), INT2NUM(USPOOF_RESTRICTION_LEVEL));
// USPOOF_SINGLE_SCRIPT deprecated in 51
rb_hash_aset(iv, ID2SYM(rb_intern("invisible")), INT2NUM(USPOOF_INVISIBLE));
rb_hash_aset(iv, ID2SYM(rb_intern("char_limit")), INT2NUM(USPOOF_CHAR_LIMIT));
rb_hash_aset(iv, ID2SYM(rb_intern("mixed_numbers")), INT2NUM(USPOOF_MIXED_NUMBERS));
rb_hash_aset(iv, ID2SYM(rb_intern("all_checks")), INT2NUM(USPOOF_ALL_CHECKS));
rb_hash_aset(iv, ID2SYM(rb_intern("aux_info")), INT2NUM(USPOOF_AUX_INFO));
rb_iv_set(klass, k_checks_name, iv);
}
return iv;
}
static const char* k_restriction_level_name = "@restriction_levels";
VALUE spoof_checker_available_restriction_levels(VALUE klass)
{
VALUE iv = rb_iv_get(klass, k_restriction_level_name);
if (NIL_P(iv)) {
iv = rb_hash_new();
rb_hash_aset(iv, ID2SYM(rb_intern("ascii")), INT2NUM(USPOOF_ASCII));
rb_hash_aset(iv, ID2SYM(rb_intern("single_script_restrictive")), INT2NUM(USPOOF_SINGLE_SCRIPT_RESTRICTIVE));
rb_hash_aset(iv, ID2SYM(rb_intern("highly_restrictive")), INT2NUM(USPOOF_HIGHLY_RESTRICTIVE));
rb_hash_aset(iv, ID2SYM(rb_intern("moderately_restrictive")), INT2NUM(USPOOF_MODERATELY_RESTRICTIVE));
rb_hash_aset(iv, ID2SYM(rb_intern("minimally_restrictive")), INT2NUM(USPOOF_MINIMALLY_RESTRICTIVE));
rb_hash_aset(iv, ID2SYM(rb_intern("unrestrictive")), INT2NUM(USPOOF_UNRESTRICTIVE));
rb_hash_aset(iv, ID2SYM(rb_intern("restriction_level_mask")), INT2NUM(USPOOF_RESTRICTION_LEVEL_MASK));
rb_hash_aset(iv, ID2SYM(rb_intern("undefined_restrictive")), INT2NUM(USPOOF_UNDEFINED_RESTRICTIVE));
rb_iv_set(klass, k_restriction_level_name, iv);
}
return iv;
}
void init_icu_spoof_checker(void)
{
rb_cICU_SpoofChecker = rb_define_class_under(rb_mICU, "SpoofChecker", rb_cObject);
rb_define_singleton_method(rb_cICU_SpoofChecker, "available_checks", spoof_checker_available_checks, 0);
rb_define_singleton_method(rb_cICU_SpoofChecker, "available_restriction_levels", spoof_checker_available_restriction_levels, 0);
rb_define_alloc_func(rb_cICU_SpoofChecker, spoof_checker_alloc);
rb_define_method(rb_cICU_SpoofChecker, "initialize", spoof_checker_initialize, 0);
rb_define_method(rb_cICU_SpoofChecker, "restriction_level", spoof_checker_get_restriction_level, 0);
rb_define_method(rb_cICU_SpoofChecker, "restriction_level=", spoof_checker_set_restriction_level, 1);
rb_define_method(rb_cICU_SpoofChecker, "check", spoof_checker_check, 1);
rb_define_method(rb_cICU_SpoofChecker, "checks", spoof_checker_get_checks, 0);
rb_define_method(rb_cICU_SpoofChecker, "checks=", spoof_checker_set_checks, 1);
rb_define_method(rb_cICU_SpoofChecker, "confusable?", spoof_checker_confusable, 2);
rb_define_method(rb_cICU_SpoofChecker, "get_skeleton", spoof_checker_get_skeleton, 1);
}
#undef DEFINE_SPOOF_ENUM_CONST
#undef GET_SPOOF_CHECKER
/* vim: set expandtab sws=4 sw=4: */
| fantasticfears/icu4r | ext/icu/icu_spoof_checker.c | C | mit | 9,203 |
/*
* Simple Vulkan application
*
* Copyright (c) 2016 by Mathias Johansson
*
* This code is licensed under the MIT license
* https://opensource.org/licenses/MIT
*/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include "util/vulkan.h"
#include "util/window.h"
int main() {
// Create an instance of vulkan
createInstance("Vulkan");
setupDebugging();
getDevice();
openWindow();
createCommandPool();
createCommandBuffer();
prepRender();
beginCommands();
VkClearColorValue clearColor = {
.uint32 = {1, 0, 0, 1}
};
VkImageMemoryBarrier preImageBarrier = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, NULL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
| VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
| VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_GENERAL, queueFam,
queueFam, swapImages[nextImage],
swapViewInfos[nextImage].subresourceRange
};
vkCmdPipelineBarrier(
comBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
0, 0, NULL, 0, NULL, 1, &preImageBarrier
);
vkCmdClearColorImage(
comBuffer, swapImages[nextImage], VK_IMAGE_LAYOUT_GENERAL,
&clearColor, 1, &swapViewInfos[nextImage].subresourceRange
);
VkImageMemoryBarrier postImageBarrier = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, NULL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
| VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
| VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_LAYOUT_GENERAL,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED, swapImages[nextImage],
swapViewInfos[nextImage].subresourceRange
};
vkCmdPipelineBarrier(
comBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
0, 0, NULL, 0, NULL, 1, &postImageBarrier
);
endCommands();
submitCommandBuffer();
tickWindow();
sleep(3);
// DESTROY
destroyInstance();
quitWindow();
return 0;
}
| Mathias9807/Vulkan-Demo | src/main.c | C | mit | 1,993 |
#include <stdio.h>
struct Employee {
unsigned int id;
char name[256];
char gender;
float salary;
};
void addEmployee(FILE *f) {
struct Employee emp;
printf("Adding a new employee, please type his id \n");
int id;
scanf("%d", &id);
if (id > 0) {
while (1) { //search if id already in use
struct Employee tmp;
fread(&tmp, sizeof(struct Employee), 1, f);
if (feof(f) != 0) { //end of file
emp.id = id;
break;
}
if (id == tmp.id) {
printf("Id already in use, id must be unique \n");
return;
} else {
emp.id = id;
}
}
} else {
printf("Id must be greater than 0 \n");
return;
}
printf("Please type his name \n");
scanf("%s", &emp.name);
printf("Please type his gender (m or f) \n");
scanf(" %c", &emp.gender);
if ((emp.gender != 'm') && (emp.gender != 'f')) {
printf("Gender should be 'm' or 'f'");
return;
}
printf("Please type his salary \n");
scanf("%f", &emp.salary);
fwrite(&emp, sizeof(struct Employee), 1, f);
}
void removeEmployee(FILE *f) {
printf("Removing employee, please type his id \n");
int id;
scanf("%d)", &id);
while (1) {
struct Employee tmp;
fread(&tmp, sizeof(struct Employee), 1, f);
if (feof(f) != 0) {
printf("Employee not found");
return;
}
if (id == tmp.id) {
fseek(f, -sizeof(struct Employee), SEEK_CUR);
tmp.id = 0;
fwrite(&tmp, sizeof(struct Employee), 1, f);
printf("Sucess \n");
return;
}
}
}
void calculateAvarageSalaryByGender(FILE *f) {
printf("Calculating the avarage salary by gender \n");
int maleNumber = 0;
int femaleNumber = 0;
float sumMale = 0;
float sumFemale = 0;
while (1) {
struct Employee tmp;
fread(&tmp, sizeof(struct Employee), 1, f);
if (feof(f) != 0)
break;
if (tmp.id == 0)
continue;
if (tmp.gender == 'm') {
maleNumber++;
sumMale += tmp.salary;
} else {
femaleNumber++;
sumFemale += tmp.salary;
}
}
printf("Avarage male salary: %f \n", sumMale/maleNumber);
printf("Avarage female salary: %f \n", sumFemale/femaleNumber);
}
void exportTextFile(FILE *f) {
char path[256];
printf("Please type the name of the file to store the data \n");
scanf("%s)", &path);
FILE *final;
if ((final = fopen(path, "w")) == NULL) {
printf("Error opening/creating the file");
} else {
while (1) {
struct Employee tmp;
fread(&tmp, sizeof(struct Employee), 1, f);
if (feof(f) != 0)
break;
if (tmp.id != 0) {
fprintf(final, "ID: %d \n", tmp.id);
fprintf(final, "Name: %s \n", tmp.name);
fprintf(final, "Gender: %c \n", tmp.gender);
fprintf(final, "Salary: %f \n", tmp.salary);
}
}
}
fclose(final);
}
void compactData(FILE *f, char fileName[]) {
FILE *copy;
if ((copy = fopen("copy", "wb")) == NULL) {
printf("Error creating the copy file");
} else {
while (1) {
struct Employee tmp;
fread(&tmp, sizeof(struct Employee), 1, f);
if (feof(f) != 0)
break;
if (tmp.id != 0) {
fwrite(&tmp, sizeof(struct Employee), 1, copy);
}
}
fclose(copy);
remove(fileName);
rename("copy", fileName);
}
printf("Database compacted");
}
int main(int argc, char *argv[]) {
if (argc == 3) {
int option = atoi(argv[2]);
FILE *f;
f = fopen(argv[1], "ab+");
fclose(f);
switch(option) {
case 1:
if ((f = fopen(argv[1], "ab+")) == NULL) {
printf("Error opening/creating the file");
} else {
addEmployee(f);
fclose(f);
}
break;
case 2:
if ((f = fopen(argv[1], "rb+")) == NULL) {
printf("Error opening/creating the file");
} else {
removeEmployee(f);
fclose(f);
}
break;
case 3:
if ((f = fopen(argv[1], "rb")) == NULL) {
printf("Error opening/creating the file");
} else {
calculateAvarageSalaryByGender(f);
fclose(f);
}
break;
case 4:
if ((f = fopen(argv[1], "rb")) == NULL) {
printf("Error opening/creating the file");
} else {
exportTextFile(f);
fclose(f);
}
break;
case 5:
if ((f = fopen(argv[1], "rb")) == NULL) {
printf("Error opening/creating the file");
} else {
compactData(f, argv[1]);
fclose(f);
}
break;
}
} else {
printf("Need to provide two arguments, the first one is the binary file and second is the option. \n");
printf("1 - Add employee \n");
printf("2 - Remove employee \n");
printf("3 - Calculate avarage salary by gender \n");
printf("4 - Export data to a text file \n");
printf("5 - Compact data \n");
}
return 0;
}
| Macelai/operating-systems | system-call/main.c | C | mit | 4,505 |
//Generated by the Argon Build System
/***********************************************************************
Copyright (c) 2006-2011, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Internet Society, IETF or IETF Trust, nor the
names of specific contributors, may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "opus/silk/float/main_FLP.h"
/* Autocorrelations for a warped frequency axis */
void silk_warped_autocorrelation_FLP(
silk_float *corr, /* O Result [order + 1] */
const silk_float *input, /* I Input data to correlate */
const silk_float warping, /* I Warping coefficient */
const opus_int length, /* I Length of input */
const opus_int order /* I Correlation order (even) */
)
{
opus_int n, i;
double tmp1, tmp2;
double state[ MAX_SHAPE_LPC_ORDER + 1 ] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
double C[ MAX_SHAPE_LPC_ORDER + 1 ] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
/* Order must be even */
silk_assert( ( order & 1 ) == 0 );
/* Loop over samples */
for( n = 0; n < length; n++ ) {
tmp1 = input[ n ];
/* Loop over allpass sections */
for( i = 0; i < order; i += 2 ) {
/* Output of allpass section */
tmp2 = state[ i ] + warping * ( state[ i + 1 ] - tmp1 );
state[ i ] = tmp1;
C[ i ] += state[ 0 ] * tmp1;
/* Output of allpass section */
tmp1 = state[ i + 1 ] + warping * ( state[ i + 2 ] - tmp2 );
state[ i + 1 ] = tmp2;
C[ i + 1 ] += state[ 0 ] * tmp2;
}
state[ order ] = tmp1;
C[ order ] += state[ 0 ] * tmp1;
}
/* Copy correlations in silk_float output format */
for( i = 0; i < order + 1; i++ ) {
corr[ i ] = ( silk_float )C[ i ];
}
}
| skylersaleh/ArgonEngine | common/opus/silk/float/warped_autocorrelation_FLP.c | C | mit | 3,596 |
/*
Fontname: -Adobe-Times-Medium-R-Normal--11-80-100-100-P-54-ISO10646-1
Copyright: Copyright (c) 1984, 1987 Adobe Systems Incorporated. All Rights Reserved. Copyright (c) 1988, 1991 Digital Equipment Corporation. All Rights Reserved.
Glyphs: 191/913
BBX Build Mode: 0
*/
const uint8_t u8g2_font_timR08_tf[2170] U8G2_FONT_SECTION("u8g2_font_timR08_tf") =
"\277\0\3\2\4\4\2\4\5\13\15\377\375\7\376\7\376\1H\2\277\10a \5\0\246\4!\7q\343"
"\304\240\4\42\7#\66E\242\4#\16ubM)I\6\245\62(\245$\1$\14\224^U\64D\242"
"i\310\22\0%\21w\42\316\60$Q\322\226$RRJ\332\22\0&\20x\42\226\232\244\312\264D\221"
"\226)\311\242\0'\6!\266\204\0(\13\223\32U\22%Q-\312\2)\14\223\32E\26eQ%J"
"\42\0*\7\63sE\322\1+\12U\242U\30\15R\30\1,\6\42\336\204\22-\6\23*\305\0."
"\6\21\343D\0/\12s\342T%\252D\21\0\60\12tb\215\22yJ\24\0\61\10scM\42u"
"\31\62\14tb\215\22eQ\26EC\0\63\14tb\215\22e\211\230\15\11\0\64\14ub]&%"
"\245d\320\302\4\65\13tb\315\22\215Y\66$\0\66\13tb\225\22-\221)Q\0\67\12tb\305"
" \325\242\254\4\70\14tb\215\22I\211\22I\211\2\71\13tb\215\22\231\222)\221\0:\6Q\343"
"D\26;\7b\336L\254\4<\7ScUR+=\10\65\246\305\240\16\2>\10SbEV)\1"
"?\13s\42\305\220DI\224&\0@\24\230Z\326\220\205I\264(Q\242D\211\222\230\332\221A\1A"
"\15w\42^:&a\222\15R\226\34B\15u\242\305\20U\242d\252D\203\2C\14v\342\315\60d"
"jUK\206\4D\20v\342\305 EJ\226dI\226D\303\220\0E\14u\242\305\240DI\70\205\321"
"\60F\14u\242\305\240DI\70\205\331\4G\16v\342\315\60dj\64\204[\62$\0H\17w\42\306"
"\262dQ\26\15R\26e\311\1I\10s\42\305\22u\31J\12t\42\315\224\265T$\0K\16v\342"
"\305\242D\225LL\262(Y\4L\11u\242\305\26v\32\6M\21y\242\306\266hR\322\224\64%K"
"\324\262$\3N\20w\42\306\262H\225RR\212\244H\231\22\0O\14v\342\315\220H\243qR\206\4"
"P\14u\242\305\20U\242d\12\263\11Q\14\226\332\315\220H\243qRV\3R\15v\342\305\220EM"
"[\222E\311\42S\13tb\315\20m\242\64$\0T\12u\242\305\245\24\266-\0U\17w\42\306\262"
"dQ\26eQ\26)\332\4V\17w\42\306\262d\221\242%a\222\306\31\0W\22{\241\306r\311J"
"R\244%K\230\264fq\226\1X\14w\42\306\262d\225\264\222U\16Y\14w\42\306\262d\225p\215"
"\323\11Z\10u\242\305\255\267a[\10\222\332\304\322\27\1\134\11s\342D\224E\265(]\10\222\332\204"
"\322\227\1^\7\63sME\11_\6\25V\305 `\6\42\372D\24a\10S\42\205\226\34\2b\14"
"ua\205\30N\225(\211\222\5c\7S\42\315T\23d\15ub\225\30-Q\22%Q\244\4e\7"
"S\42\315%\23f\12t\42\225\22MYi\1g\14tZ\315\220\64eJ\64$\0h\13va\205"
"\232nQO\212\0i\7r\342L\244tj\11\223\331T(uZ\0k\13va\205Z\252d[T"
"\22l\10s\42\205\324\313\0m\14X\42\306\42\265D\225\250\342\0n\12Ub\305T\211\222(\61o"
"\11Tb\215\22\231\22\5p\16\205U\305\20U\242$J\246\60\233\0q\14\205V\315\22%Q\22E"
"cmr\11S\42E\222(Q\62s\10S\42\315\226\15\1t\11d\42M\64eE\1u\14Ub"
"\205\62%Q\22EJ\0v\14Va\305\242DY\222\251\21\0w\15Y!\306b\211jIQQ\223"
"\14x\12U\242\205\242\324*\211\1y\14vY\205\262D\225P\14\65\21z\11Tb\305\20\65\15\2"
"{\12\223\32U\22U\262\250\26|\6\221\232\304\3}\13\223\32E\26\325\222\250\22\1~\7&\352\215"
"d\1\240\5\0\246\4\241\7q\333D\62\10\242\14t^U\64DZ\224H\21\0\243\14ub\225\224"
"D\331\226I\203\2\244\14efE\226LI\224DK\26\245\15ubE\226TL\321 e\13\0\246"
"\6q\242\304\62\247\17\224Z\315\20%QRJJI\64$\0\250\6\23wE\22\251\16wc\326V"
"\211\24%\223\224Z\66\1\252\10S*\205\246d\3\253\11DfM\242\64%\1\254\7%\347\305 \6"
"\255\6\23*\305\0\256\15wc\326VI\26\223\322-\233\0\257\6\23\66\305\0\260\11D.\215\22I"
"\211\2\261\14u\242U\30\15R\230\3\203\0\262\7C\356\314R\31\263\10C\356\304\222-\0\264\6\42"
"\366\214\2\265\16uZE\224DI\224D\311\42\206\0\266\23\226\232\315\60(\311\222,\221\222%Y\222"
"%Y\222\0\267\6\21\252D\0\270\7\63\26M\266\0\271\10C\356L\42%\3\272\7S*M\307\1"
"\273\12DfE\22%\25%\1\274\21w\42N\226HY\24\15I\226h\203\222%\0\275\17w\42N"
"\226HY\24\15\211\224\224\232\6\276\17w\42\306\324\230DJ-\321\6%K\0\277\12s\32M\32%"
"Q\62\4\300\20\247\42V\16\344p:&a\222\15R\226\34\301\17\247\42f\232\303\351\230\204I\66H"
"Yr\302\20\247\42^\232\344h:&a\222\15R\226\34\303\21\247\42^\222%\71\232\216I\230d\203"
"\224%\7\304\17\227\42V\222\243\351\230\204I\66HYr\305\20\247\42^\232\244q:&a\222\15R"
"\226\34\306\20xb\336 M\225\64\231\206\60\212\206d\10\307\17\246\326\315\60djUK\206,\316\64"
"\0\310\17\245\242M\232\3\203\22%\341\24F\303\0\311\16\245\242]\35\30\224(\11\247\60\32\6\312\17"
"\245\242U\226\304\203\22%\341\24F\303\0\313\15\225\242MyP\242$\234\302h\30\314\12\243\42E\26"
".Q\227\1\315\12\243\42U\22.Q\227\1\316\11\243\42M\333\22u\31\317\12\223\42E\222-Q\227"
"\1\320\20v\342\305 EJ\66DI\226D\303\220\0\321\23\247\42^\222%\71$-R$UJI"
")R\246\4\322\17\246\342U\234CC\42\215\306I\31\22\0\323\17\246\342]\230cC\42\215\306I\31"
"\22\0\324\17\246\342]\230\344\310\220H\243qR\206\4\325\20\246\342U\22%\71\64$\322h\234\224!"
"\1\326\17\226\342M\224#C\42\215\306I\31\22\0\327\12U\242E\226\324*\265\0\330\22\230\35>\20"
"\15\222\251\22\205Q\22E\246A\312\1\331\22\247\42V\16\344\330\262dQ\26eQ\26)\332\4\332\22"
"\247\42f\232c\313\222EY\224EY\244h\23\0\333\22\247\42^\232\344\320\262dQ\26eQ\26)"
"\332\4\334\22\227\42V\222C\313\222EY\224EY\244h\23\0\335\16\247\42f\232c\226\254\22\256q"
":\1\336\13u\242\305\26N\225)\233\0\337\13tbU\245EJ*C\2\340\11\203\42E\26j\311"
"!\341\11\203\42U\22j\311!\342\11\203\42M\233\226\34\2\343\14\204\42M\242\244b\244T\26\0\344"
"\11s\42E\222i\311!\345\11\203\42M\27-\71\4\346\13U\242\205\42%\311RR\4\347\11\203\26"
"\315TS\262\5\350\11\203\42E\226.K&\351\11\203\42U\222.K&\352\11\203\42M\343\262d\2"
"\353\11s\42E\22.K&\354\10\202\342D\24)\35\355\11\203\342T\22J]\0\356\10\203\342L\233"
"\324\5\357\11s\342D\222I]\0\360\14\204bM\66$\321\20\231\22\5\361\14\205bMw`\252D"
"I\224\30\362\13\204bM\30+\221)Q\0\363\13\204bU\35P\42S\242\0\364\14\204bM\224\304"
"JdJ\24\0\365\14\204bM\242\304JdJ\24\0\366\13tbE\22+\221)Q\0\367\12U\242"
"U\16\14:\20\1\370\12v]m\64\365iJ\1\371\17\205bM\232\3Q\22%Q\22EJ\0\372"
"\17\205bU\226#Q\22%Q\22EJ\0\373\17\205bU\226\304Q\22%Q\22EJ\0\374\15u"
"bM\71J\242$J\242H\11\375\17\246Y]\230C\312\22UB\61\324D\0\376\17\245U\205\30N"
"\225(\211\222)\314&\0\377\16\226YM\35Q\226\250\22\212\241&\2\0\0\0";
| WiseLabCMU/gridballast | Source/framework/main/u8g2/tools/font/build/single_font_files/u8g2_font_timR08_tf.c | C | mit | 6,149 |
/*
* @brief This file contains USB HID Keyboard example using USB ROM Drivers.
*
* @note
* Copyright(C) NXP Semiconductors, 2013
* All rights reserved.
*
* @par
* Software that is described herein is for illustrative purposes only
* which provides customers with programming information regarding the
* LPC products. This software is supplied "AS IS" without any warranties of
* any kind, and NXP Semiconductors and its licensor disclaim any and
* all warranties, express or implied, including all implied warranties of
* merchantability, fitness for a particular purpose and non-infringement of
* intellectual property rights. NXP Semiconductors assumes no responsibility
* or liability for the use of the software, conveys no license or rights under any
* patent, copyright, mask work right, or any other intellectual property rights in
* or to any products. NXP Semiconductors reserves the right to make changes
* in the software without notification. NXP Semiconductors also makes no
* representation or warranty that such application will be suitable for the
* specified use without further testing or modification.
*
* @par
* Permission to use, copy, modify, and distribute this software and its
* documentation is hereby granted, under NXP Semiconductors' and its
* licensor's relevant copyrights in the software, without fee, provided that it
* is used in conjunction with NXP Semiconductors microcontrollers. This
* copyright, permission, and disclaimer notice must appear in all copies of
* this code.
*/
#include "board.h"
#include <stdint.h>
#include <string.h>
#include "usbd_rom_api.h"
#include "hid_keyboard.h"
#include "ms_timer.h"
/*****************************************************************************
* Private types/enumerations/variables
****************************************************************************/
/**
* @brief Structure to hold Keyboard data
*/
typedef struct {
USBD_HANDLE_T hUsb; /*!< Handle to USB stack. */
uint8_t report[KEYBOARD_REPORT_SIZE]; /*!< Last report data */
uint8_t tx_busy; /*!< Flag indicating whether a report is pending in endpoint queue. */
ms_timer_t tmo; /*!< Timer to track when to send next report. */
} Keyboard_Ctrl_T;
/** Singleton instance of Keyboard control */
static Keyboard_Ctrl_T g_keyBoard;
/*****************************************************************************
* Public types/enumerations/variables
****************************************************************************/
extern const uint8_t Keyboard_ReportDescriptor[];
extern const uint16_t Keyboard_ReportDescSize;
/*****************************************************************************
* Private functions
****************************************************************************/
/* Routine to update keyboard state */
static void Keyboard_UpdateReport(void)
{
uint8_t joystick_status = Joystick_GetStatus();
HID_KEYBOARD_CLEAR_REPORT(&g_keyBoard.report[0]);
switch (joystick_status) {
case JOY_PRESS:
HID_KEYBOARD_REPORT_SET_KEY_PRESS(g_keyBoard.report, 0x53);
break;
case JOY_LEFT:
HID_KEYBOARD_REPORT_SET_KEY_PRESS(g_keyBoard.report, 0x5C);
break;
case JOY_RIGHT:
HID_KEYBOARD_REPORT_SET_KEY_PRESS(g_keyBoard.report, 0x5E);
break;
case JOY_UP:
HID_KEYBOARD_REPORT_SET_KEY_PRESS(g_keyBoard.report, 0x60);
break;
case JOY_DOWN:
HID_KEYBOARD_REPORT_SET_KEY_PRESS(g_keyBoard.report, 0x5A);
break;
}
}
/* HID Get Report Request Callback. Called automatically on HID Get Report Request */
static ErrorCode_t Keyboard_GetReport(USBD_HANDLE_T hHid,
USB_SETUP_PACKET *pSetup,
uint8_t * *pBuffer,
uint16_t *plength)
{
/* ReportID = SetupPacket.wValue.WB.L; */
switch (pSetup->wValue.WB.H) {
case HID_REPORT_INPUT:
Keyboard_UpdateReport();
memcpy(*pBuffer, &g_keyBoard.report[0], KEYBOARD_REPORT_SIZE);
*plength = KEYBOARD_REPORT_SIZE;
break;
case HID_REPORT_OUTPUT: /* Not Supported */
case HID_REPORT_FEATURE: /* Not Supported */
return ERR_USBD_STALL;
}
return LPC_OK;
}
/* HID Set Report Request Callback. Called automatically on HID Set Report Request */
static ErrorCode_t Keyboard_SetReport(USBD_HANDLE_T hHid, USB_SETUP_PACKET *pSetup, uint8_t * *pBuffer, uint16_t length)
{
/* we will reuse standard EP0Buf */
if (length == 0) {
return LPC_OK;
}
/* ReportID = SetupPacket.wValue.WB.L; */
switch (pSetup->wValue.WB.H) {
case HID_REPORT_OUTPUT:
/* If the USB host tells us to turn on the NUM LOCK LED,
* then turn on LED#2.
*/
if (**pBuffer & 0x01) {
Board_LED_Set(0, 1);
}
else {
Board_LED_Set(0, 0);
}
break;
case HID_REPORT_INPUT: /* Not Supported */
case HID_REPORT_FEATURE: /* Not Supported */
return ERR_USBD_STALL;
}
return LPC_OK;
}
/* HID interrupt IN endpoint handler */
static ErrorCode_t Keyboard_EpIN_Hdlr(USBD_HANDLE_T hUsb, void *data, uint32_t event)
{
switch (event) {
case USB_EVT_IN:
g_keyBoard.tx_busy = 0;
break;
}
return LPC_OK;
}
/*****************************************************************************
* Public functions
****************************************************************************/
/* HID keyboard init routine */
ErrorCode_t Keyboard_init(USBD_HANDLE_T hUsb,
USB_INTERFACE_DESCRIPTOR *pIntfDesc,
uint32_t *mem_base,
uint32_t *mem_size)
{
USBD_HID_INIT_PARAM_T hid_param;
USB_HID_REPORT_T reports_data[1];
ErrorCode_t ret = LPC_OK;
/* Do a quick check of if the interface descriptor passed is the right one. */
if ((pIntfDesc == 0) || (pIntfDesc->bInterfaceClass != USB_DEVICE_CLASS_HUMAN_INTERFACE)) {
return ERR_FAILED;
}
/* init joystick control */
Board_Joystick_Init();
/* Init HID params */
memset((void *) &hid_param, 0, sizeof(USBD_HID_INIT_PARAM_T));
hid_param.max_reports = 1;
hid_param.mem_base = *mem_base;
hid_param.mem_size = *mem_size;
hid_param.intf_desc = (uint8_t *) pIntfDesc;
/* user defined functions */
hid_param.HID_GetReport = Keyboard_GetReport;
hid_param.HID_SetReport = Keyboard_SetReport;
hid_param.HID_EpIn_Hdlr = Keyboard_EpIN_Hdlr;
/* Init reports_data */
reports_data[0].len = Keyboard_ReportDescSize;
reports_data[0].idle_time = 0;
reports_data[0].desc = (uint8_t *) &Keyboard_ReportDescriptor[0];
hid_param.report_data = reports_data;
ret = USBD_API->hid->init(hUsb, &hid_param);
/* update memory variables */
*mem_base = hid_param.mem_base;
*mem_size = hid_param.mem_size;
/* store stack handle for later use. */
g_keyBoard.hUsb = hUsb;
/* start the mouse timer */
ms_timerInit(&g_keyBoard.tmo, HID_KEYBRD_REPORT_INTERVAL_MS);
return ret;
}
/* Keyboard tasks */
void Keyboard_Tasks(void)
{
/* check if moue report timer expired */
if (ms_timerExpired(&g_keyBoard.tmo)) {
/* reset timer */
ms_timerStart(&g_keyBoard.tmo);
/* check device is configured before sending report. */
if ( USB_IsConfigured(g_keyBoard.hUsb)) {
/* update report based on board state */
Keyboard_UpdateReport();
/* send report data */
if (g_keyBoard.tx_busy == 0) {
g_keyBoard.tx_busy = 1;
USBD_API->hw->WriteEP(g_keyBoard.hUsb, HID_EP_IN, &g_keyBoard.report[0], KEYBOARD_REPORT_SIZE);
}
}
}
}
| miragecentury/M2_SE_RTOS_Project | Project/LPC1549_Keil/examples/usbd_rom/usbd_rom_hid_keyboard/hid_keyboard.c | C | mit | 7,219 |
/*
* Copyright (c) 2010, ETH Zurich.
* All rights reserved.
*
* INTERFACE NAME: mem
* INTEFACE FILE: ../if/mem.if
* INTERFACE DESCRIPTION: Memory allocation RPC interface
*
* This file is distributed under the terms in the attached LICENSE
* file. If you do not find this file, copies can be found by
* writing to:
* ETH Zurich D-INFK, Universitaetstr.6, CH-8092 Zurich.
* Attn: Systems Group.
*
* THIS FILE IS AUTOMATICALLY GENERATED BY FLOUNDER: DO NOT EDIT!
*/
#include <barrelfish/barrelfish.h>
#include <flounder/flounder_support.h>
#include <if/mem_defs.h>
/*
* Export function
*/
errval_t mem_export(void *st, idc_export_callback_fn *export_cb, mem_connect_fn *connect_cb, struct waitset *ws, idc_export_flags_t flags)
{
struct mem_export *e = malloc(sizeof(struct mem_export ));
if (e == NULL) {
return(LIB_ERR_MALLOC_FAIL);
}
// fill in common parts of export struct
e->connect_cb = connect_cb;
e->waitset = ws;
e->st = st;
(e->common).export_callback = export_cb;
(e->common).flags = flags;
(e->common).connect_cb_st = e;
(e->common).export_cb_st = st;
// fill in connect handler for each enabled backend
#ifdef CONFIG_FLOUNDER_BACKEND_LMP
(e->common).lmp_connect_callback = mem_lmp_connect_handler;
#endif // CONFIG_FLOUNDER_BACKEND_LMP
#ifdef CONFIG_FLOUNDER_BACKEND_UMP
(e->common).ump_connect_callback = mem_ump_connect_handler;
#endif // CONFIG_FLOUNDER_BACKEND_UMP
#ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
(e->common).ump_connect_callback = mem_ump_ipi_connect_handler;
#endif // CONFIG_FLOUNDER_BACKEND_UMP_IPI
#ifdef CONFIG_FLOUNDER_BACKEND_MULTIHOP
(e->common).multihop_connect_callback = mem_multihop_connect_handler;
#endif // CONFIG_FLOUNDER_BACKEND_MULTIHOP
return(idc_export_service(&(e->common)));
}
/*
* Generic bind function
*/
static void mem_bind_continuation_direct(void *st, errval_t err, struct mem_binding *_binding)
{
// This bind cont function uses the different backends in the following order:
// lmp ump_ipi ump multihop
struct flounder_generic_bind_attempt *b = st;
switch (b->driver_num) {
case 0:
(b->driver_num)++;
#ifdef CONFIG_FLOUNDER_BACKEND_LMP
// try next backend
b->binding = malloc(sizeof(struct mem_lmp_binding ));
assert((b->binding) != NULL);
err = mem_lmp_bind(b->binding, b->iref, mem_bind_continuation_direct, b, b->waitset, b->flags, DEFAULT_LMP_BUF_WORDS);
if (err_is_fail(err)) {
free(b->binding);
_binding = NULL;
goto out;
} else {
return;
}
#else
// skip non-enabled backend (fall through)
#endif // CONFIG_FLOUNDER_BACKEND_LMP
case 1:
#ifdef CONFIG_FLOUNDER_BACKEND_LMP
if (err_is_ok(err)) {
goto out;
} else {
free(b->binding);
if (err_no(err) == MON_ERR_IDC_BIND_NOT_SAME_CORE) {
goto try_next_1;
} else {
// report permanent failure to user
_binding = NULL;
goto out;
}
}
try_next_1:
#endif // CONFIG_FLOUNDER_BACKEND_LMP
(b->driver_num)++;
#ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
// try next backend
b->binding = malloc(sizeof(struct mem_ump_ipi_binding ));
assert((b->binding) != NULL);
err = mem_ump_ipi_bind(b->binding, b->iref, mem_bind_continuation_direct, b, b->waitset, b->flags, DEFAULT_UMP_BUFLEN, DEFAULT_UMP_BUFLEN);
if (err_is_fail(err)) {
free(b->binding);
_binding = NULL;
goto out;
} else {
return;
}
#else
// skip non-enabled backend (fall through)
#endif // CONFIG_FLOUNDER_BACKEND_UMP_IPI
case 2:
#ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
if (err_is_ok(err)) {
goto out;
} else {
free(b->binding);
if (true) {
goto try_next_2;
} else {
// report permanent failure to user
_binding = NULL;
goto out;
}
}
try_next_2:
#endif // CONFIG_FLOUNDER_BACKEND_UMP_IPI
(b->driver_num)++;
#ifdef CONFIG_FLOUNDER_BACKEND_UMP
// try next backend
b->binding = malloc(sizeof(struct mem_ump_binding ));
assert((b->binding) != NULL);
err = mem_ump_bind(b->binding, b->iref, mem_bind_continuation_direct, b, b->waitset, b->flags, DEFAULT_UMP_BUFLEN, DEFAULT_UMP_BUFLEN);
if (err_is_fail(err)) {
free(b->binding);
_binding = NULL;
goto out;
} else {
return;
}
#else
// skip non-enabled backend (fall through)
#endif // CONFIG_FLOUNDER_BACKEND_UMP
case 3:
#ifdef CONFIG_FLOUNDER_BACKEND_UMP
if (err_is_ok(err)) {
goto out;
} else {
free(b->binding);
if (true) {
goto try_next_3;
} else {
// report permanent failure to user
_binding = NULL;
goto out;
}
}
try_next_3:
#endif // CONFIG_FLOUNDER_BACKEND_UMP
(b->driver_num)++;
#ifdef CONFIG_FLOUNDER_BACKEND_MULTIHOP
// try next backend
b->binding = malloc(sizeof(struct mem_multihop_binding ));
assert((b->binding) != NULL);
err = mem_multihop_bind(b->binding, b->iref, mem_bind_continuation_direct, b, b->waitset, b->flags);
if (err_is_fail(err)) {
free(b->binding);
_binding = NULL;
goto out;
} else {
return;
}
#else
// skip non-enabled backend (fall through)
#endif // CONFIG_FLOUNDER_BACKEND_MULTIHOP
case 4:
#ifdef CONFIG_FLOUNDER_BACKEND_MULTIHOP
if (err_is_ok(err)) {
goto out;
} else {
free(b->binding);
if (!true) {
_binding = NULL;
goto out;
}
}
#endif // CONFIG_FLOUNDER_BACKEND_MULTIHOP
err = FLOUNDER_ERR_GENERIC_BIND_NO_MORE_DRIVERS;
_binding = NULL;
goto out;
default:
assert(!("invalid state"));
}
out:
((mem_bind_continuation_fn *)(b->callback))(b->st, err, _binding);
free(b);
}
static void mem_bind_contination_multihop(void *st, errval_t err, struct mem_binding *_binding)
{
// This bind cont function uses the different backends in the following order:
// lmp multihop ump_ipi ump
struct flounder_generic_bind_attempt *b = st;
switch (b->driver_num) {
case 0:
(b->driver_num)++;
#ifdef CONFIG_FLOUNDER_BACKEND_LMP
// try next backend
b->binding = malloc(sizeof(struct mem_lmp_binding ));
assert((b->binding) != NULL);
err = mem_lmp_bind(b->binding, b->iref, mem_bind_contination_multihop, b, b->waitset, b->flags, DEFAULT_LMP_BUF_WORDS);
if (err_is_fail(err)) {
free(b->binding);
_binding = NULL;
goto out;
} else {
return;
}
#else
// skip non-enabled backend (fall through)
#endif // CONFIG_FLOUNDER_BACKEND_LMP
case 1:
#ifdef CONFIG_FLOUNDER_BACKEND_LMP
if (err_is_ok(err)) {
goto out;
} else {
free(b->binding);
if (err_no(err) == MON_ERR_IDC_BIND_NOT_SAME_CORE) {
goto try_next_1;
} else {
// report permanent failure to user
_binding = NULL;
goto out;
}
}
try_next_1:
#endif // CONFIG_FLOUNDER_BACKEND_LMP
(b->driver_num)++;
#ifdef CONFIG_FLOUNDER_BACKEND_MULTIHOP
// try next backend
b->binding = malloc(sizeof(struct mem_multihop_binding ));
assert((b->binding) != NULL);
err = mem_multihop_bind(b->binding, b->iref, mem_bind_contination_multihop, b, b->waitset, b->flags);
if (err_is_fail(err)) {
free(b->binding);
_binding = NULL;
goto out;
} else {
return;
}
#else
// skip non-enabled backend (fall through)
#endif // CONFIG_FLOUNDER_BACKEND_MULTIHOP
case 2:
#ifdef CONFIG_FLOUNDER_BACKEND_MULTIHOP
if (err_is_ok(err)) {
goto out;
} else {
free(b->binding);
if (true) {
goto try_next_2;
} else {
// report permanent failure to user
_binding = NULL;
goto out;
}
}
try_next_2:
#endif // CONFIG_FLOUNDER_BACKEND_MULTIHOP
(b->driver_num)++;
#ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
// try next backend
b->binding = malloc(sizeof(struct mem_ump_ipi_binding ));
assert((b->binding) != NULL);
err = mem_ump_ipi_bind(b->binding, b->iref, mem_bind_contination_multihop, b, b->waitset, b->flags, DEFAULT_UMP_BUFLEN, DEFAULT_UMP_BUFLEN);
if (err_is_fail(err)) {
free(b->binding);
_binding = NULL;
goto out;
} else {
return;
}
#else
// skip non-enabled backend (fall through)
#endif // CONFIG_FLOUNDER_BACKEND_UMP_IPI
case 3:
#ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
if (err_is_ok(err)) {
goto out;
} else {
free(b->binding);
if (true) {
goto try_next_3;
} else {
// report permanent failure to user
_binding = NULL;
goto out;
}
}
try_next_3:
#endif // CONFIG_FLOUNDER_BACKEND_UMP_IPI
(b->driver_num)++;
#ifdef CONFIG_FLOUNDER_BACKEND_UMP
// try next backend
b->binding = malloc(sizeof(struct mem_ump_binding ));
assert((b->binding) != NULL);
err = mem_ump_bind(b->binding, b->iref, mem_bind_contination_multihop, b, b->waitset, b->flags, DEFAULT_UMP_BUFLEN, DEFAULT_UMP_BUFLEN);
if (err_is_fail(err)) {
free(b->binding);
_binding = NULL;
goto out;
} else {
return;
}
#else
// skip non-enabled backend (fall through)
#endif // CONFIG_FLOUNDER_BACKEND_UMP
case 4:
#ifdef CONFIG_FLOUNDER_BACKEND_UMP
if (err_is_ok(err)) {
goto out;
} else {
free(b->binding);
if (!true) {
_binding = NULL;
goto out;
}
}
#endif // CONFIG_FLOUNDER_BACKEND_UMP
err = FLOUNDER_ERR_GENERIC_BIND_NO_MORE_DRIVERS;
_binding = NULL;
goto out;
default:
assert(!("invalid state"));
}
out:
((mem_bind_continuation_fn *)(b->callback))(b->st, err, _binding);
free(b);
}
errval_t mem_bind(iref_t iref, mem_bind_continuation_fn *_continuation, void *st, struct waitset *waitset, idc_bind_flags_t flags)
{
// allocate state
struct flounder_generic_bind_attempt *b = malloc(sizeof(struct flounder_generic_bind_attempt ));
if (b == NULL) {
return(LIB_ERR_MALLOC_FAIL);
}
// fill in binding state
b->iref = iref;
b->waitset = waitset;
b->driver_num = 0;
b->callback = _continuation;
b->st = st;
b->flags = flags;
if (flags & IDC_BIND_FLAG_MULTIHOP) {
mem_bind_contination_multihop(b, SYS_ERR_OK, NULL);
} else {
mem_bind_continuation_direct(b, SYS_ERR_OK, NULL);
}
return(SYS_ERR_OK);
}
/*
* Copyright (c) 2010, ETH Zurich.
* All rights reserved.
*
* INTERFACE NAME: mem
* INTEFACE FILE: ../if/mem.if
* INTERFACE DESCRIPTION: Memory allocation RPC interface
*
* This file is distributed under the terms in the attached LICENSE
* file. If you do not find this file, copies can be found by
* writing to:
* ETH Zurich D-INFK, Universitaetstr.6, CH-8092 Zurich.
* Attn: Systems Group.
*
* THIS FILE IS AUTOMATICALLY GENERATED BY FLOUNDER: DO NOT EDIT!
*/
/*
* Generated Stub for LMP on x86_64
*/
#include <string.h>
#include <barrelfish/barrelfish.h>
#include <flounder/flounder_support.h>
#include <flounder/flounder_support_lmp.h>
#include <if/mem_defs.h>
/*
* Send handler functions
*/
static void mem_allocate_call__lmp_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_lmp_binding *b = arg;
errval_t err;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
err = lmp_chan_send3(&(b->chan), b->flags, NULL_CAP, mem_allocate_call__msgnum | (((uintptr_t )(((_binding->tx_union).allocate_call).bits)) << 16), ((_binding->tx_union).allocate_call).minbase, ((_binding->tx_union).allocate_call).maxlimit);
if (err_is_ok(err)) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
} else {
break;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
if (lmp_err_is_transient(err)) {
// Construct retry closure and register it
struct event_closure retry_closure = (struct event_closure){ .handler = mem_allocate_call__lmp_send_handler, .arg = arg };
err = lmp_chan_register_send(&(b->chan), _binding->waitset, retry_closure);
assert(err_is_ok(err));
} else {
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
}
static void mem_allocate_response__lmp_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_lmp_binding *b = arg;
errval_t err;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
err = lmp_chan_send2(&(b->chan), b->flags, ((_binding->tx_union).allocate_response).mem_cap, mem_allocate_response__msgnum, ((_binding->tx_union).allocate_response).ret);
if (err_is_ok(err)) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
} else {
break;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
if (lmp_err_is_transient(err)) {
// Construct retry closure and register it
struct event_closure retry_closure = (struct event_closure){ .handler = mem_allocate_response__lmp_send_handler, .arg = arg };
err = lmp_chan_register_send(&(b->chan), _binding->waitset, retry_closure);
assert(err_is_ok(err));
} else {
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
}
static void mem_steal_call__lmp_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_lmp_binding *b = arg;
errval_t err;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
err = lmp_chan_send3(&(b->chan), b->flags, NULL_CAP, mem_steal_call__msgnum | (((uintptr_t )(((_binding->tx_union).steal_call).bits)) << 16), ((_binding->tx_union).steal_call).minbase, ((_binding->tx_union).steal_call).maxlimit);
if (err_is_ok(err)) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
} else {
break;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
if (lmp_err_is_transient(err)) {
// Construct retry closure and register it
struct event_closure retry_closure = (struct event_closure){ .handler = mem_steal_call__lmp_send_handler, .arg = arg };
err = lmp_chan_register_send(&(b->chan), _binding->waitset, retry_closure);
assert(err_is_ok(err));
} else {
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
}
static void mem_steal_response__lmp_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_lmp_binding *b = arg;
errval_t err;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
err = lmp_chan_send2(&(b->chan), b->flags, ((_binding->tx_union).steal_response).mem_cap, mem_steal_response__msgnum, ((_binding->tx_union).steal_response).ret);
if (err_is_ok(err)) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
} else {
break;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
if (lmp_err_is_transient(err)) {
// Construct retry closure and register it
struct event_closure retry_closure = (struct event_closure){ .handler = mem_steal_response__lmp_send_handler, .arg = arg };
err = lmp_chan_register_send(&(b->chan), _binding->waitset, retry_closure);
assert(err_is_ok(err));
} else {
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
}
static void mem_available_call__lmp_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_lmp_binding *b = arg;
errval_t err;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
err = lmp_chan_send1(&(b->chan), b->flags, NULL_CAP, mem_available_call__msgnum);
if (err_is_ok(err)) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
} else {
break;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
if (lmp_err_is_transient(err)) {
// Construct retry closure and register it
struct event_closure retry_closure = (struct event_closure){ .handler = mem_available_call__lmp_send_handler, .arg = arg };
err = lmp_chan_register_send(&(b->chan), _binding->waitset, retry_closure);
assert(err_is_ok(err));
} else {
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
}
static void mem_available_response__lmp_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_lmp_binding *b = arg;
errval_t err;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
err = lmp_chan_send3(&(b->chan), b->flags, NULL_CAP, mem_available_response__msgnum, ((_binding->tx_union).available_response).mem_avail, ((_binding->tx_union).available_response).mem_total);
if (err_is_ok(err)) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
} else {
break;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
if (lmp_err_is_transient(err)) {
// Construct retry closure and register it
struct event_closure retry_closure = (struct event_closure){ .handler = mem_available_response__lmp_send_handler, .arg = arg };
err = lmp_chan_register_send(&(b->chan), _binding->waitset, retry_closure);
assert(err_is_ok(err));
} else {
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
}
static void mem_free_monitor_call__lmp_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_lmp_binding *b = arg;
errval_t err;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
err = lmp_chan_send2(&(b->chan), b->flags, ((_binding->tx_union).free_monitor_call).mem_cap, mem_free_monitor_call__msgnum | (((uintptr_t )(((_binding->tx_union).free_monitor_call).bits)) << 16), ((_binding->tx_union).free_monitor_call).base);
if (err_is_ok(err)) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
} else {
break;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
if (lmp_err_is_transient(err)) {
// Construct retry closure and register it
struct event_closure retry_closure = (struct event_closure){ .handler = mem_free_monitor_call__lmp_send_handler, .arg = arg };
err = lmp_chan_register_send(&(b->chan), _binding->waitset, retry_closure);
assert(err_is_ok(err));
} else {
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
}
static void mem_free_monitor_response__lmp_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_lmp_binding *b = arg;
errval_t err;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
err = lmp_chan_send2(&(b->chan), b->flags, NULL_CAP, mem_free_monitor_response__msgnum, ((_binding->tx_union).free_monitor_response).err);
if (err_is_ok(err)) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
} else {
break;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
if (lmp_err_is_transient(err)) {
// Construct retry closure and register it
struct event_closure retry_closure = (struct event_closure){ .handler = mem_free_monitor_response__lmp_send_handler, .arg = arg };
err = lmp_chan_register_send(&(b->chan), _binding->waitset, retry_closure);
assert(err_is_ok(err));
} else {
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
}
/*
* Message sender functions
*/
static errval_t mem_allocate_call__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, uint8_t bits, mem_genpaddr_t minbase, mem_genpaddr_t maxlimit)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_allocate_call__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).allocate_call).bits = bits;
((_binding->tx_union).allocate_call).minbase = minbase;
((_binding->tx_union).allocate_call).maxlimit = maxlimit;
FL_DEBUG("lmp TX mem.allocate_call\n");
// try to send!
mem_allocate_call__lmp_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_allocate_response__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t ret, struct capref mem_cap)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_allocate_response__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).allocate_response).ret = ret;
((_binding->tx_union).allocate_response).mem_cap = mem_cap;
FL_DEBUG("lmp TX mem.allocate_response\n");
// try to send!
mem_allocate_response__lmp_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_steal_call__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, uint8_t bits, mem_genpaddr_t minbase, mem_genpaddr_t maxlimit)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_steal_call__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).steal_call).bits = bits;
((_binding->tx_union).steal_call).minbase = minbase;
((_binding->tx_union).steal_call).maxlimit = maxlimit;
FL_DEBUG("lmp TX mem.steal_call\n");
// try to send!
mem_steal_call__lmp_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_steal_response__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t ret, struct capref mem_cap)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_steal_response__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).steal_response).ret = ret;
((_binding->tx_union).steal_response).mem_cap = mem_cap;
FL_DEBUG("lmp TX mem.steal_response\n");
// try to send!
mem_steal_response__lmp_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_available_call__lmp_send(struct mem_binding *_binding, struct event_closure _continuation)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_available_call__msgnum;
_binding->tx_msg_fragment = 0;
FL_DEBUG("lmp TX mem.available_call\n");
// try to send!
mem_available_call__lmp_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_available_response__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, mem_genpaddr_t mem_avail, mem_genpaddr_t mem_total)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_available_response__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).available_response).mem_avail = mem_avail;
((_binding->tx_union).available_response).mem_total = mem_total;
FL_DEBUG("lmp TX mem.available_response\n");
// try to send!
mem_available_response__lmp_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_free_monitor_call__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, struct capref mem_cap, mem_genpaddr_t base, uint8_t bits)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_free_monitor_call__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).free_monitor_call).mem_cap = mem_cap;
((_binding->tx_union).free_monitor_call).base = base;
((_binding->tx_union).free_monitor_call).bits = bits;
FL_DEBUG("lmp TX mem.free_monitor_call\n");
// try to send!
mem_free_monitor_call__lmp_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_free_monitor_response__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t err)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_free_monitor_response__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).free_monitor_response).err = err;
FL_DEBUG("lmp TX mem.free_monitor_response\n");
// try to send!
mem_free_monitor_response__lmp_send_handler(_binding);
return(SYS_ERR_OK);
}
/*
* Send vtable
*/
static struct mem_tx_vtbl mem_lmp_tx_vtbl = {
.allocate_call = mem_allocate_call__lmp_send,
.allocate_response = mem_allocate_response__lmp_send,
.steal_call = mem_steal_call__lmp_send,
.steal_response = mem_steal_response__lmp_send,
.available_call = mem_available_call__lmp_send,
.available_response = mem_available_response__lmp_send,
.free_monitor_call = mem_free_monitor_call__lmp_send,
.free_monitor_response = mem_free_monitor_response__lmp_send,
};
/*
* Receive handler
*/
void mem_lmp_rx_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_lmp_binding *b = arg;
errval_t err;
struct lmp_recv_msg msg = LMP_RECV_MSG_INIT;
struct capref cap;
struct event_closure recv_closure = (struct event_closure){ .handler = mem_lmp_rx_handler, .arg = arg };
do {
// try to retrieve a message from the channel
err = lmp_chan_recv(&(b->chan), &msg, &cap);
// check if we succeeded
if (err_is_fail(err)) {
if (err_no(err) == LIB_ERR_NO_LMP_MSG) {
// no message
break;
} else {
// real error
(_binding->error_handler)(_binding, err_push(err, LIB_ERR_LMP_CHAN_RECV));
return;
}
}
// allocate a new receive slot if needed
if (!capref_is_null(cap)) {
err = lmp_chan_alloc_recv_slot(&(b->chan));
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err_push(err, LIB_ERR_LMP_ALLOC_RECV_SLOT));
}
}
// is this the start of a new message?
if ((_binding->rx_msgnum) == 0) {
// check message length
if (((msg.buf).msglen) == 0) {
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_EMPTY_MSG);
break;
}
// unmarshall message number from first word, set fragment to 0
_binding->rx_msgnum = (((msg.words)[0]) & 0xffff);
_binding->rx_msg_fragment = 0;
}
// switch on message number and fragment number
switch (_binding->rx_msgnum) {
case mem_allocate_call__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
// check length
if (((msg.buf).msglen) > 4) {
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_INVALID_LENGTH);
goto out;
}
((_binding->rx_union).allocate_call).bits = ((((msg.words)[0]) >> 16) & 0xff);
((_binding->rx_union).allocate_call).minbase = ((msg.words)[1]);
((_binding->rx_union).allocate_call).maxlimit = ((msg.words)[2]);
FL_DEBUG("lmp RX mem.allocate_call\n");
assert(((_binding->rx_vtbl).allocate_call) != NULL);
((_binding->rx_vtbl).allocate_call)(_binding, ((_binding->rx_union).allocate_call).bits, ((_binding->rx_union).allocate_call).minbase, ((_binding->rx_union).allocate_call).maxlimit);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_allocate_response__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
// check length
if (((msg.buf).msglen) > 4) {
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_INVALID_LENGTH);
goto out;
}
((_binding->rx_union).allocate_response).ret = ((msg.words)[1]);
((_binding->rx_union).allocate_response).mem_cap = cap;
FL_DEBUG("lmp RX mem.allocate_response\n");
assert(((_binding->rx_vtbl).allocate_response) != NULL);
((_binding->rx_vtbl).allocate_response)(_binding, ((_binding->rx_union).allocate_response).ret, ((_binding->rx_union).allocate_response).mem_cap);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_steal_call__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
// check length
if (((msg.buf).msglen) > 4) {
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_INVALID_LENGTH);
goto out;
}
((_binding->rx_union).steal_call).bits = ((((msg.words)[0]) >> 16) & 0xff);
((_binding->rx_union).steal_call).minbase = ((msg.words)[1]);
((_binding->rx_union).steal_call).maxlimit = ((msg.words)[2]);
FL_DEBUG("lmp RX mem.steal_call\n");
assert(((_binding->rx_vtbl).steal_call) != NULL);
((_binding->rx_vtbl).steal_call)(_binding, ((_binding->rx_union).steal_call).bits, ((_binding->rx_union).steal_call).minbase, ((_binding->rx_union).steal_call).maxlimit);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_steal_response__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
// check length
if (((msg.buf).msglen) > 4) {
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_INVALID_LENGTH);
goto out;
}
((_binding->rx_union).steal_response).ret = ((msg.words)[1]);
((_binding->rx_union).steal_response).mem_cap = cap;
FL_DEBUG("lmp RX mem.steal_response\n");
assert(((_binding->rx_vtbl).steal_response) != NULL);
((_binding->rx_vtbl).steal_response)(_binding, ((_binding->rx_union).steal_response).ret, ((_binding->rx_union).steal_response).mem_cap);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_available_call__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
// check length
if (((msg.buf).msglen) > 4) {
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_INVALID_LENGTH);
goto out;
}
FL_DEBUG("lmp RX mem.available_call\n");
assert(((_binding->rx_vtbl).available_call) != NULL);
((_binding->rx_vtbl).available_call)(_binding);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_available_response__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
// check length
if (((msg.buf).msglen) > 4) {
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_INVALID_LENGTH);
goto out;
}
((_binding->rx_union).available_response).mem_avail = ((msg.words)[1]);
((_binding->rx_union).available_response).mem_total = ((msg.words)[2]);
FL_DEBUG("lmp RX mem.available_response\n");
assert(((_binding->rx_vtbl).available_response) != NULL);
((_binding->rx_vtbl).available_response)(_binding, ((_binding->rx_union).available_response).mem_avail, ((_binding->rx_union).available_response).mem_total);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_free_monitor_call__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
// check length
if (((msg.buf).msglen) > 4) {
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_INVALID_LENGTH);
goto out;
}
((_binding->rx_union).free_monitor_call).bits = ((((msg.words)[0]) >> 16) & 0xff);
((_binding->rx_union).free_monitor_call).base = ((msg.words)[1]);
((_binding->rx_union).free_monitor_call).mem_cap = cap;
FL_DEBUG("lmp RX mem.free_monitor_call\n");
assert(((_binding->rx_vtbl).free_monitor_call) != NULL);
((_binding->rx_vtbl).free_monitor_call)(_binding, ((_binding->rx_union).free_monitor_call).mem_cap, ((_binding->rx_union).free_monitor_call).base, ((_binding->rx_union).free_monitor_call).bits);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_free_monitor_response__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
// check length
if (((msg.buf).msglen) > 4) {
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_INVALID_LENGTH);
goto out;
}
((_binding->rx_union).free_monitor_response).err = ((msg.words)[1]);
FL_DEBUG("lmp RX mem.free_monitor_response\n");
assert(((_binding->rx_vtbl).free_monitor_response) != NULL);
((_binding->rx_vtbl).free_monitor_response)(_binding, ((_binding->rx_union).free_monitor_response).err);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_INVALID_MSGNUM);
goto out;
}
} while (err_is_ok(err));
out:
// re-register for another receive notification
err = lmp_chan_register_recv(&(b->chan), _binding->waitset, recv_closure);
assert(err_is_ok(err));
}
/*
* Control functions
*/
static bool mem_lmp_can_send(struct mem_binding *b)
{
return((b->tx_msgnum) == 0);
}
static errval_t mem_lmp_register_send(struct mem_binding *b, struct waitset *ws, struct event_closure _continuation)
{
return(flounder_support_register(ws, &(b->register_chanstate), _continuation, mem_lmp_can_send(b)));
}
static void mem_lmp_default_error_handler(struct mem_binding *b, errval_t err)
{
DEBUG_ERR(err, "asynchronous error in Flounder-generated mem lmp binding (default handler)");
abort();
}
static errval_t mem_lmp_change_waitset(struct mem_binding *_binding, struct waitset *ws)
{
struct mem_lmp_binding *b = (void *)(_binding);
// Migrate register and TX continuation notifications
flounder_support_migrate_notify(&(_binding->register_chanstate), ws);
flounder_support_migrate_notify(&(_binding->tx_cont_chanstate), ws);
// change waitset on binding
_binding->waitset = ws;
// Migrate send and receive notifications
lmp_chan_migrate_recv(&(b->chan), ws);
lmp_chan_migrate_send(&(b->chan), ws);
return(SYS_ERR_OK);
}
static errval_t mem_lmp_control(struct mem_binding *_binding, idc_control_t control)
{
struct mem_lmp_binding *b = (void *)(_binding);
b->flags = idc_control_to_lmp_flags(control, b->flags);
return(SYS_ERR_OK);
}
/*
* Functions to initialise/destroy the binding state
*/
void mem_lmp_init(struct mem_lmp_binding *b, struct waitset *waitset)
{
(b->b).st = NULL;
(b->b).waitset = waitset;
event_mutex_init(&((b->b).mutex), waitset);
(b->b).can_send = mem_lmp_can_send;
(b->b).register_send = mem_lmp_register_send;
(b->b).error_handler = mem_lmp_default_error_handler;
(b->b).tx_vtbl = mem_lmp_tx_vtbl;
memset(&((b->b).rx_vtbl), 0, sizeof((b->b).rx_vtbl));
flounder_support_waitset_chanstate_init(&((b->b).register_chanstate));
flounder_support_waitset_chanstate_init(&((b->b).tx_cont_chanstate));
(b->b).tx_msgnum = 0;
(b->b).rx_msgnum = 0;
(b->b).tx_msg_fragment = 0;
(b->b).rx_msg_fragment = 0;
(b->b).tx_str_pos = 0;
(b->b).rx_str_pos = 0;
(b->b).tx_str_len = 0;
(b->b).rx_str_len = 0;
(b->b).bind_cont = NULL;
lmp_chan_init(&(b->chan));
(b->b).change_waitset = mem_lmp_change_waitset;
(b->b).control = mem_lmp_control;
b->flags = LMP_SEND_FLAGS_DEFAULT;
}
void mem_lmp_destroy(struct mem_lmp_binding *b)
{
flounder_support_waitset_chanstate_destroy(&((b->b).register_chanstate));
flounder_support_waitset_chanstate_destroy(&((b->b).tx_cont_chanstate));
lmp_chan_destroy(&(b->chan));
}
/*
* Bind function
*/
static void mem_lmp_bind_continuation(void *st, errval_t err, struct lmp_chan *chan)
{
struct mem_lmp_binding *b = st;
if (err_is_ok(err)) {
// allocate a cap receive slot
err = lmp_chan_alloc_recv_slot(chan);
if (err_is_fail(err)) {
err = err_push(err, LIB_ERR_LMP_ALLOC_RECV_SLOT);
goto fail;
}
// register for receive
err = lmp_chan_register_recv(chan, (b->b).waitset, (struct event_closure){ .handler = mem_lmp_rx_handler, .arg = b });
if (err_is_fail(err)) {
err = err_push(err, LIB_ERR_CHAN_REGISTER_RECV);
goto fail;
}
} else {
fail:
mem_lmp_destroy(b);
}
((b->b).bind_cont)((b->b).st, err, &(b->b));
}
errval_t mem_lmp_bind(struct mem_lmp_binding *b, iref_t iref, mem_bind_continuation_fn *_continuation, void *st, struct waitset *waitset, idc_bind_flags_t flags, size_t lmp_buflen)
{
errval_t err;
mem_lmp_init(b, waitset);
(b->b).st = st;
(b->b).bind_cont = _continuation;
err = lmp_chan_bind(&(b->chan), (struct lmp_bind_continuation){ .handler = mem_lmp_bind_continuation, .st = b }, &((b->b).event_qnode), iref, lmp_buflen);
if (err_is_fail(err)) {
mem_lmp_destroy(b);
}
return(err);
}
/*
* Connect callback for export
*/
errval_t mem_lmp_connect_handler(void *st, size_t buflen_words, struct capref endpoint, struct lmp_chan **retchan)
{
struct mem_export *e = st;
errval_t err;
// allocate storage for binding
struct mem_lmp_binding *b = malloc(sizeof(struct mem_lmp_binding ));
if (b == NULL) {
return(LIB_ERR_MALLOC_FAIL);
}
struct mem_binding *_binding = &(b->b);
mem_lmp_init(b, e->waitset);
// run user's connect handler
err = ((e->connect_cb)(e->st, _binding));
if (err_is_fail(err)) {
// connection refused
mem_lmp_destroy(b);
return(err);
}
// accept the connection and setup the channel
// FIXME: user policy needed to decide on the size of the message buffer?
err = lmp_chan_accept(&(b->chan), buflen_words, endpoint);
if (err_is_fail(err)) {
err = err_push(err, LIB_ERR_LMP_CHAN_ACCEPT);
(_binding->error_handler)(_binding, err);
return(err);
}
// allocate a cap receive slot
err = lmp_chan_alloc_recv_slot(&(b->chan));
if (err_is_fail(err)) {
err = err_push(err, LIB_ERR_LMP_ALLOC_RECV_SLOT);
(_binding->error_handler)(_binding, err);
return(err);
}
// register for receive
err = lmp_chan_register_recv(&(b->chan), _binding->waitset, (struct event_closure){ .handler = mem_lmp_rx_handler, .arg = b });
if (err_is_fail(err)) {
err = err_push(err, LIB_ERR_CHAN_REGISTER_RECV);
(_binding->error_handler)(_binding, err);
return(err);
}
*retchan = (&(b->chan));
return(SYS_ERR_OK);
}
/*
* Copyright (c) 2010, ETH Zurich.
* All rights reserved.
*
* INTERFACE NAME: mem
* INTEFACE FILE: ../if/mem.if
* INTERFACE DESCRIPTION: Memory allocation RPC interface
*
* This file is distributed under the terms in the attached LICENSE
* file. If you do not find this file, copies can be found by
* writing to:
* ETH Zurich D-INFK, Universitaetstr.6, CH-8092 Zurich.
* Attn: Systems Group.
*
* THIS FILE IS AUTOMATICALLY GENERATED BY FLOUNDER: DO NOT EDIT!
*/
#ifdef CONFIG_FLOUNDER_BACKEND_UMP
/*
* Generated Stub for UMP
*/
#include <barrelfish/barrelfish.h>
#include <barrelfish/monitor_client.h>
#include <flounder/flounder_support.h>
#include <flounder/flounder_support_ump.h>
#include <if/mem_defs.h>
/*
* Send handler function
*/
static void mem_ump_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_ump_binding *b = arg;
errval_t err;
err = SYS_ERR_OK;
volatile struct ump_message *msg;
struct ump_control ctrl;
bool tx_notify = false;
// do we need to (and can we) send a cap ack?
if ((((b->ump_state).capst).tx_cap_ack) && flounder_stub_ump_can_send(&(b->ump_state))) {
flounder_stub_ump_send_cap_ack(&(b->ump_state));
((b->ump_state).capst).tx_cap_ack = false;
tx_notify = true;
}
// Switch on current outgoing message number
switch (_binding->tx_msgnum) {
case 0:
break;
case mem_allocate_call__msgnum:
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
// send the next fragment
msg = ump_chan_get_next(&((b->ump_state).chan), &ctrl);
flounder_stub_ump_control_fill(&(b->ump_state), &ctrl, mem_allocate_call__msgnum);
(msg->data)[0] = (((_binding->tx_union).allocate_call).bits);
(msg->data)[1] = (((_binding->tx_union).allocate_call).minbase);
(msg->data)[2] = (((_binding->tx_union).allocate_call).maxlimit);
flounder_stub_ump_barrier();
(msg->header).control = ctrl;
(_binding->tx_msg_fragment)++;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
case 1:
// we've sent all the fragments, we must just be waiting for caps
assert((((b->ump_state).capst).tx_capnum) <= 0);
break;
default:
assert(!("invalid fragment"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_allocate_response__msgnum:
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
// send the next fragment
msg = ump_chan_get_next(&((b->ump_state).chan), &ctrl);
flounder_stub_ump_control_fill(&(b->ump_state), &ctrl, mem_allocate_response__msgnum);
(msg->data)[0] = (((_binding->tx_union).allocate_response).ret);
flounder_stub_ump_barrier();
(msg->header).control = ctrl;
(_binding->tx_msg_fragment)++;
if ((((b->ump_state).capst).tx_capnum) == 2) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
return;
case 1:
// we've sent all the fragments, we must just be waiting for caps
assert((((b->ump_state).capst).tx_capnum) <= 1);
break;
default:
assert(!("invalid fragment"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_steal_call__msgnum:
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
// send the next fragment
msg = ump_chan_get_next(&((b->ump_state).chan), &ctrl);
flounder_stub_ump_control_fill(&(b->ump_state), &ctrl, mem_steal_call__msgnum);
(msg->data)[0] = (((_binding->tx_union).steal_call).bits);
(msg->data)[1] = (((_binding->tx_union).steal_call).minbase);
(msg->data)[2] = (((_binding->tx_union).steal_call).maxlimit);
flounder_stub_ump_barrier();
(msg->header).control = ctrl;
(_binding->tx_msg_fragment)++;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
case 1:
// we've sent all the fragments, we must just be waiting for caps
assert((((b->ump_state).capst).tx_capnum) <= 0);
break;
default:
assert(!("invalid fragment"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_steal_response__msgnum:
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
// send the next fragment
msg = ump_chan_get_next(&((b->ump_state).chan), &ctrl);
flounder_stub_ump_control_fill(&(b->ump_state), &ctrl, mem_steal_response__msgnum);
(msg->data)[0] = (((_binding->tx_union).steal_response).ret);
flounder_stub_ump_barrier();
(msg->header).control = ctrl;
(_binding->tx_msg_fragment)++;
if ((((b->ump_state).capst).tx_capnum) == 2) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
return;
case 1:
// we've sent all the fragments, we must just be waiting for caps
assert((((b->ump_state).capst).tx_capnum) <= 1);
break;
default:
assert(!("invalid fragment"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_available_call__msgnum:
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
// send the next fragment
msg = ump_chan_get_next(&((b->ump_state).chan), &ctrl);
flounder_stub_ump_control_fill(&(b->ump_state), &ctrl, mem_available_call__msgnum);
flounder_stub_ump_barrier();
(msg->header).control = ctrl;
(_binding->tx_msg_fragment)++;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
case 1:
// we've sent all the fragments, we must just be waiting for caps
assert((((b->ump_state).capst).tx_capnum) <= 0);
break;
default:
assert(!("invalid fragment"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_available_response__msgnum:
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
// send the next fragment
msg = ump_chan_get_next(&((b->ump_state).chan), &ctrl);
flounder_stub_ump_control_fill(&(b->ump_state), &ctrl, mem_available_response__msgnum);
(msg->data)[0] = (((_binding->tx_union).available_response).mem_avail);
(msg->data)[1] = (((_binding->tx_union).available_response).mem_total);
flounder_stub_ump_barrier();
(msg->header).control = ctrl;
(_binding->tx_msg_fragment)++;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
case 1:
// we've sent all the fragments, we must just be waiting for caps
assert((((b->ump_state).capst).tx_capnum) <= 0);
break;
default:
assert(!("invalid fragment"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_free_monitor_call__msgnum:
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
// send the next fragment
msg = ump_chan_get_next(&((b->ump_state).chan), &ctrl);
flounder_stub_ump_control_fill(&(b->ump_state), &ctrl, mem_free_monitor_call__msgnum);
(msg->data)[0] = (((_binding->tx_union).free_monitor_call).bits);
(msg->data)[1] = (((_binding->tx_union).free_monitor_call).base);
flounder_stub_ump_barrier();
(msg->header).control = ctrl;
(_binding->tx_msg_fragment)++;
if ((((b->ump_state).capst).tx_capnum) == 2) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
return;
case 1:
// we've sent all the fragments, we must just be waiting for caps
assert((((b->ump_state).capst).tx_capnum) <= 1);
break;
default:
assert(!("invalid fragment"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_free_monitor_response__msgnum:
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
// send the next fragment
msg = ump_chan_get_next(&((b->ump_state).chan), &ctrl);
flounder_stub_ump_control_fill(&(b->ump_state), &ctrl, mem_free_monitor_response__msgnum);
(msg->data)[0] = (((_binding->tx_union).free_monitor_response).err);
flounder_stub_ump_barrier();
(msg->header).control = ctrl;
(_binding->tx_msg_fragment)++;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
case 1:
// we've sent all the fragments, we must just be waiting for caps
assert((((b->ump_state).capst).tx_capnum) <= 0);
break;
default:
assert(!("invalid fragment"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
default:
assert(!("invalid msgnum"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
// Send a notification if necessary
if (tx_notify) {
}
}
/*
* Capability sender function
*/
static void mem_ump_cap_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_ump_binding *b = arg;
errval_t err;
err = SYS_ERR_OK;
assert(((b->ump_state).capst).rx_cap_ack);
assert(((b->ump_state).capst).monitor_mutex_held);
// Switch on current outgoing message
switch (_binding->tx_msgnum) {
case mem_allocate_response__msgnum:
// Switch on current outgoing cap
switch (((b->ump_state).capst).tx_capnum) {
case 0:
err = flounder_stub_send_cap(&((b->ump_state).capst), ((b->ump_state).chan).monitor_binding, ((b->ump_state).chan).monitor_id, ((_binding->tx_union).allocate_response).mem_cap, true, mem_ump_cap_send_handler);
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
break;
}
break;
case 1:
flounder_support_monitor_mutex_unlock(((b->ump_state).chan).monitor_binding);
if ((_binding->tx_msg_fragment) == 1) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
break;
default:
assert(!("invalid cap number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_steal_response__msgnum:
// Switch on current outgoing cap
switch (((b->ump_state).capst).tx_capnum) {
case 0:
err = flounder_stub_send_cap(&((b->ump_state).capst), ((b->ump_state).chan).monitor_binding, ((b->ump_state).chan).monitor_id, ((_binding->tx_union).steal_response).mem_cap, true, mem_ump_cap_send_handler);
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
break;
}
break;
case 1:
flounder_support_monitor_mutex_unlock(((b->ump_state).chan).monitor_binding);
if ((_binding->tx_msg_fragment) == 1) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
break;
default:
assert(!("invalid cap number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_free_monitor_call__msgnum:
// Switch on current outgoing cap
switch (((b->ump_state).capst).tx_capnum) {
case 0:
err = flounder_stub_send_cap(&((b->ump_state).capst), ((b->ump_state).chan).monitor_binding, ((b->ump_state).chan).monitor_id, ((_binding->tx_union).free_monitor_call).mem_cap, true, mem_ump_cap_send_handler);
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
break;
}
break;
case 1:
flounder_support_monitor_mutex_unlock(((b->ump_state).chan).monitor_binding);
if ((_binding->tx_msg_fragment) == 1) {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
break;
default:
assert(!("invalid cap number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
default:
assert(!("invalid message number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
}
/*
* Receive handler
*/
void mem_ump_rx_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_ump_binding *b = arg;
errval_t err;
err = SYS_ERR_OK;
volatile struct ump_message *msg;
int msgnum;
while (true) {
// try to retrieve a message from the channel
err = ump_chan_recv(&((b->ump_state).chan), &msg);
// check if we succeeded
if (err_is_fail(err)) {
if (err_no(err) == LIB_ERR_NO_UMP_MSG) {
// no message
break;
} else {
// real error
(_binding->error_handler)(_binding, err_push(err, LIB_ERR_UMP_CHAN_RECV));
return;
}
}
// process control word
msgnum = flounder_stub_ump_control_process(&(b->ump_state), (msg->header).control);
// is this a dummy message (ACK)?
if (msgnum == FL_UMP_ACK) {
goto loopnext;
}
// is this a cap ack for a pending tx message
if (msgnum == FL_UMP_CAP_ACK) {
assert(!(((b->ump_state).capst).rx_cap_ack));
((b->ump_state).capst).rx_cap_ack = true;
if (((b->ump_state).capst).monitor_mutex_held) {
mem_ump_cap_send_handler(b);
}
goto loopnext;
}
// is this the start of a new message?
if ((_binding->rx_msgnum) == 0) {
_binding->rx_msgnum = msgnum;
_binding->rx_msg_fragment = 0;
}
// switch on message number and fragment number
switch (_binding->rx_msgnum) {
case mem_allocate_call__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
((_binding->rx_union).allocate_call).bits = (((msg->data)[0]) & 0xff);
((_binding->rx_union).allocate_call).minbase = ((msg->data)[1]);
((_binding->rx_union).allocate_call).maxlimit = ((msg->data)[2]);
FL_DEBUG("ump RX mem.allocate_call\n");
assert(((_binding->rx_vtbl).allocate_call) != NULL);
((_binding->rx_vtbl).allocate_call)(_binding, ((_binding->rx_union).allocate_call).bits, ((_binding->rx_union).allocate_call).minbase, ((_binding->rx_union).allocate_call).maxlimit);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_allocate_response__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
((b->ump_state).capst).tx_cap_ack = true;
((b->ump_state).capst).rx_capnum = 0;
((_binding->rx_union).allocate_response).ret = ((msg->data)[0]);
(_binding->rx_msg_fragment)++;
if ((((b->ump_state).capst).rx_capnum) == 1) {
FL_DEBUG("ump RX mem.allocate_response\n");
assert(((_binding->rx_vtbl).allocate_response) != NULL);
((_binding->rx_vtbl).allocate_response)(_binding, ((_binding->rx_union).allocate_response).ret, ((_binding->rx_union).allocate_response).mem_cap);
_binding->rx_msgnum = 0;
} else {
// don't process anything else until we're done
goto out_no_reregister;
}
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_steal_call__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
((_binding->rx_union).steal_call).bits = (((msg->data)[0]) & 0xff);
((_binding->rx_union).steal_call).minbase = ((msg->data)[1]);
((_binding->rx_union).steal_call).maxlimit = ((msg->data)[2]);
FL_DEBUG("ump RX mem.steal_call\n");
assert(((_binding->rx_vtbl).steal_call) != NULL);
((_binding->rx_vtbl).steal_call)(_binding, ((_binding->rx_union).steal_call).bits, ((_binding->rx_union).steal_call).minbase, ((_binding->rx_union).steal_call).maxlimit);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_steal_response__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
((b->ump_state).capst).tx_cap_ack = true;
((b->ump_state).capst).rx_capnum = 0;
((_binding->rx_union).steal_response).ret = ((msg->data)[0]);
(_binding->rx_msg_fragment)++;
if ((((b->ump_state).capst).rx_capnum) == 1) {
FL_DEBUG("ump RX mem.steal_response\n");
assert(((_binding->rx_vtbl).steal_response) != NULL);
((_binding->rx_vtbl).steal_response)(_binding, ((_binding->rx_union).steal_response).ret, ((_binding->rx_union).steal_response).mem_cap);
_binding->rx_msgnum = 0;
} else {
// don't process anything else until we're done
goto out_no_reregister;
}
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_available_call__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
FL_DEBUG("ump RX mem.available_call\n");
assert(((_binding->rx_vtbl).available_call) != NULL);
((_binding->rx_vtbl).available_call)(_binding);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_available_response__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
((_binding->rx_union).available_response).mem_avail = ((msg->data)[0]);
((_binding->rx_union).available_response).mem_total = ((msg->data)[1]);
FL_DEBUG("ump RX mem.available_response\n");
assert(((_binding->rx_vtbl).available_response) != NULL);
((_binding->rx_vtbl).available_response)(_binding, ((_binding->rx_union).available_response).mem_avail, ((_binding->rx_union).available_response).mem_total);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_free_monitor_call__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
((b->ump_state).capst).tx_cap_ack = true;
((b->ump_state).capst).rx_capnum = 0;
((_binding->rx_union).free_monitor_call).bits = (((msg->data)[0]) & 0xff);
((_binding->rx_union).free_monitor_call).base = ((msg->data)[1]);
(_binding->rx_msg_fragment)++;
if ((((b->ump_state).capst).rx_capnum) == 1) {
FL_DEBUG("ump RX mem.free_monitor_call\n");
assert(((_binding->rx_vtbl).free_monitor_call) != NULL);
((_binding->rx_vtbl).free_monitor_call)(_binding, ((_binding->rx_union).free_monitor_call).mem_cap, ((_binding->rx_union).free_monitor_call).base, ((_binding->rx_union).free_monitor_call).bits);
_binding->rx_msgnum = 0;
} else {
// don't process anything else until we're done
goto out_no_reregister;
}
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
case mem_free_monitor_response__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
((_binding->rx_union).free_monitor_response).err = ((msg->data)[0]);
FL_DEBUG("ump RX mem.free_monitor_response\n");
assert(((_binding->rx_vtbl).free_monitor_response) != NULL);
((_binding->rx_vtbl).free_monitor_response)(_binding, ((_binding->rx_union).free_monitor_response).err);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_INVALID_MSGNUM);
goto out;
}
loopnext:
// send an ack if the channel is now full
if (flounder_stub_ump_needs_ack(&(b->ump_state))) {
// run our send process if we need to
if ((((b->ump_state).capst).tx_cap_ack) || ((_binding->tx_msgnum) != 0)) {
mem_ump_send_handler(b);
} else {
flounder_stub_ump_send_ack(&(b->ump_state));
}
}
}
out:
// register for receive notification
err = ump_chan_register_recv(&((b->ump_state).chan), _binding->waitset, (struct event_closure){ .handler = mem_ump_rx_handler, .arg = _binding });
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err_push(err, LIB_ERR_CHAN_REGISTER_RECV));
}
out_no_reregister:
__attribute__((unused));
// run our send process, if we need to
if ((((b->ump_state).capst).tx_cap_ack) || ((_binding->tx_msgnum) != 0)) {
mem_ump_send_handler(b);
} else {
// otherwise send a forced ack if the channel is now full
if (flounder_stub_ump_needs_ack(&(b->ump_state))) {
flounder_stub_ump_send_ack(&(b->ump_state));
}
}
}
/*
* Cap send/receive handlers
*/
static void mem_ump_cap_rx_handler(void *arg, errval_t success, struct capref cap, uint32_t capid)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_ump_binding *b = arg;
errval_t err;
err = SYS_ERR_OK;
assert(capid == (((b->ump_state).capst).rx_capnum));
// Check if there's an associated error
// FIXME: how should we report this to the user? at present we just deliver a NULL capref
if (err_is_fail(success)) {
DEBUG_ERR(err, "error in cap transfer");
}
// Switch on current incoming message
switch (_binding->rx_msgnum) {
case mem_allocate_response__msgnum:
// Switch on current incoming cap
switch ((((b->ump_state).capst).rx_capnum)++) {
case 0:
((_binding->rx_union).allocate_response).mem_cap = cap;
if ((_binding->rx_msg_fragment) == 1) {
FL_DEBUG("ump RX mem.allocate_response\n");
assert(((_binding->rx_vtbl).allocate_response) != NULL);
((_binding->rx_vtbl).allocate_response)(_binding, ((_binding->rx_union).allocate_response).ret, ((_binding->rx_union).allocate_response).mem_cap);
_binding->rx_msgnum = 0;
// register for receive notification
err = ump_chan_register_recv(&((b->ump_state).chan), _binding->waitset, (struct event_closure){ .handler = mem_ump_rx_handler, .arg = _binding });
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err_push(err, LIB_ERR_CHAN_REGISTER_RECV));
}
}
break;
default:
assert(!("invalid cap number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_steal_response__msgnum:
// Switch on current incoming cap
switch ((((b->ump_state).capst).rx_capnum)++) {
case 0:
((_binding->rx_union).steal_response).mem_cap = cap;
if ((_binding->rx_msg_fragment) == 1) {
FL_DEBUG("ump RX mem.steal_response\n");
assert(((_binding->rx_vtbl).steal_response) != NULL);
((_binding->rx_vtbl).steal_response)(_binding, ((_binding->rx_union).steal_response).ret, ((_binding->rx_union).steal_response).mem_cap);
_binding->rx_msgnum = 0;
// register for receive notification
err = ump_chan_register_recv(&((b->ump_state).chan), _binding->waitset, (struct event_closure){ .handler = mem_ump_rx_handler, .arg = _binding });
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err_push(err, LIB_ERR_CHAN_REGISTER_RECV));
}
}
break;
default:
assert(!("invalid cap number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_free_monitor_call__msgnum:
// Switch on current incoming cap
switch ((((b->ump_state).capst).rx_capnum)++) {
case 0:
((_binding->rx_union).free_monitor_call).mem_cap = cap;
if ((_binding->rx_msg_fragment) == 1) {
FL_DEBUG("ump RX mem.free_monitor_call\n");
assert(((_binding->rx_vtbl).free_monitor_call) != NULL);
((_binding->rx_vtbl).free_monitor_call)(_binding, ((_binding->rx_union).free_monitor_call).mem_cap, ((_binding->rx_union).free_monitor_call).base, ((_binding->rx_union).free_monitor_call).bits);
_binding->rx_msgnum = 0;
// register for receive notification
err = ump_chan_register_recv(&((b->ump_state).chan), _binding->waitset, (struct event_closure){ .handler = mem_ump_rx_handler, .arg = _binding });
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err_push(err, LIB_ERR_CHAN_REGISTER_RECV));
}
}
break;
default:
assert(!("invalid cap number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
default:
assert(!("invalid message number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
}
/*
* Monitor mutex acquire continuation
*/
static void mem_ump_monitor_mutex_cont(void *arg)
{
struct mem_ump_binding *b = arg;
assert(!(((b->ump_state).capst).monitor_mutex_held));
((b->ump_state).capst).monitor_mutex_held = true;
if (((b->ump_state).capst).rx_cap_ack) {
mem_ump_cap_send_handler(b);
}
}
/*
* Message sender functions
*/
static errval_t mem_allocate_call__ump_send(struct mem_binding *_binding, struct event_closure _continuation, uint8_t bits, mem_genpaddr_t minbase, mem_genpaddr_t maxlimit)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_allocate_call__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).allocate_call).bits = bits;
((_binding->tx_union).allocate_call).minbase = minbase;
((_binding->tx_union).allocate_call).maxlimit = maxlimit;
FL_DEBUG("ump TX mem.allocate_call\n");
// try to send!
mem_ump_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_allocate_response__ump_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t ret, struct capref mem_cap)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_allocate_response__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).allocate_response).ret = ret;
((_binding->tx_union).allocate_response).mem_cap = mem_cap;
FL_DEBUG("ump TX mem.allocate_response\n");
// init cap send state
((((struct mem_ump_binding *)(_binding))->ump_state).capst).tx_capnum = 0;
((((struct mem_ump_binding *)(_binding))->ump_state).capst).rx_cap_ack = false;
((((struct mem_ump_binding *)(_binding))->ump_state).capst).monitor_mutex_held = false;
// wait to acquire the monitor binding mutex
flounder_support_monitor_mutex_enqueue(((((struct mem_ump_binding *)(_binding))->ump_state).chan).monitor_binding, &(_binding->event_qnode), (struct event_closure){ .handler = mem_ump_monitor_mutex_cont, .arg = _binding });
// try to send!
mem_ump_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_steal_call__ump_send(struct mem_binding *_binding, struct event_closure _continuation, uint8_t bits, mem_genpaddr_t minbase, mem_genpaddr_t maxlimit)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_steal_call__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).steal_call).bits = bits;
((_binding->tx_union).steal_call).minbase = minbase;
((_binding->tx_union).steal_call).maxlimit = maxlimit;
FL_DEBUG("ump TX mem.steal_call\n");
// try to send!
mem_ump_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_steal_response__ump_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t ret, struct capref mem_cap)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_steal_response__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).steal_response).ret = ret;
((_binding->tx_union).steal_response).mem_cap = mem_cap;
FL_DEBUG("ump TX mem.steal_response\n");
// init cap send state
((((struct mem_ump_binding *)(_binding))->ump_state).capst).tx_capnum = 0;
((((struct mem_ump_binding *)(_binding))->ump_state).capst).rx_cap_ack = false;
((((struct mem_ump_binding *)(_binding))->ump_state).capst).monitor_mutex_held = false;
// wait to acquire the monitor binding mutex
flounder_support_monitor_mutex_enqueue(((((struct mem_ump_binding *)(_binding))->ump_state).chan).monitor_binding, &(_binding->event_qnode), (struct event_closure){ .handler = mem_ump_monitor_mutex_cont, .arg = _binding });
// try to send!
mem_ump_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_available_call__ump_send(struct mem_binding *_binding, struct event_closure _continuation)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_available_call__msgnum;
_binding->tx_msg_fragment = 0;
FL_DEBUG("ump TX mem.available_call\n");
// try to send!
mem_ump_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_available_response__ump_send(struct mem_binding *_binding, struct event_closure _continuation, mem_genpaddr_t mem_avail, mem_genpaddr_t mem_total)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_available_response__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).available_response).mem_avail = mem_avail;
((_binding->tx_union).available_response).mem_total = mem_total;
FL_DEBUG("ump TX mem.available_response\n");
// try to send!
mem_ump_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_free_monitor_call__ump_send(struct mem_binding *_binding, struct event_closure _continuation, struct capref mem_cap, mem_genpaddr_t base, uint8_t bits)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_free_monitor_call__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).free_monitor_call).mem_cap = mem_cap;
((_binding->tx_union).free_monitor_call).base = base;
((_binding->tx_union).free_monitor_call).bits = bits;
FL_DEBUG("ump TX mem.free_monitor_call\n");
// init cap send state
((((struct mem_ump_binding *)(_binding))->ump_state).capst).tx_capnum = 0;
((((struct mem_ump_binding *)(_binding))->ump_state).capst).rx_cap_ack = false;
((((struct mem_ump_binding *)(_binding))->ump_state).capst).monitor_mutex_held = false;
// wait to acquire the monitor binding mutex
flounder_support_monitor_mutex_enqueue(((((struct mem_ump_binding *)(_binding))->ump_state).chan).monitor_binding, &(_binding->event_qnode), (struct event_closure){ .handler = mem_ump_monitor_mutex_cont, .arg = _binding });
// try to send!
mem_ump_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_free_monitor_response__ump_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t err)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and arguments
_binding->tx_msgnum = mem_free_monitor_response__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).free_monitor_response).err = err;
FL_DEBUG("ump TX mem.free_monitor_response\n");
// try to send!
mem_ump_send_handler(_binding);
return(SYS_ERR_OK);
}
/*
* Send vtable
*/
static struct mem_tx_vtbl mem_ump_tx_vtbl = {
.allocate_call = mem_allocate_call__ump_send,
.allocate_response = mem_allocate_response__ump_send,
.steal_call = mem_steal_call__ump_send,
.steal_response = mem_steal_response__ump_send,
.available_call = mem_available_call__ump_send,
.available_response = mem_available_response__ump_send,
.free_monitor_call = mem_free_monitor_call__ump_send,
.free_monitor_response = mem_free_monitor_response__ump_send,
};
/*
* Control functions
*/
static bool mem_ump_can_send(struct mem_binding *b)
{
return((b->tx_msgnum) == 0);
}
static errval_t mem_ump_register_send(struct mem_binding *b, struct waitset *ws, struct event_closure _continuation)
{
return(flounder_support_register(ws, &(b->register_chanstate), _continuation, mem_ump_can_send(b)));
}
static void mem_ump_default_error_handler(struct mem_binding *b, errval_t err)
{
DEBUG_ERR(err, "asynchronous error in Flounder-generated mem ump binding (default handler)");
abort();
}
static errval_t mem_ump_change_waitset(struct mem_binding *_binding, struct waitset *ws)
{
struct mem_ump_binding *b = (void *)(_binding);
errval_t err;
// change waitset on private monitor binding if we have one
if ((((b->ump_state).chan).monitor_binding) != get_monitor_binding()) {
err = flounder_support_change_monitor_waitset(((b->ump_state).chan).monitor_binding, ws);
if (err_is_fail(err)) {
return(err_push(err, FLOUNDER_ERR_CHANGE_MONITOR_WAITSET));
}
}
// change waitset on binding
_binding->waitset = ws;
// re-register for receive (if previously registered)
err = ump_chan_deregister_recv(&((b->ump_state).chan));
if (err_is_fail(err) && (err_no(err) != LIB_ERR_CHAN_NOT_REGISTERED)) {
return(err_push(err, LIB_ERR_CHAN_DEREGISTER_RECV));
}
if (err_is_ok(err)) {
err = ump_chan_register_recv(&((b->ump_state).chan), _binding->waitset, (struct event_closure){ .handler = mem_ump_rx_handler, .arg = _binding });
if (err_is_fail(err)) {
return(err_push(err, LIB_ERR_CHAN_REGISTER_RECV));
}
}
return(SYS_ERR_OK);
}
static errval_t mem_ump_control(struct mem_binding *_binding, idc_control_t control)
{
// no control flags are supported
return(SYS_ERR_OK);
}
/*
* Function to destroy the binding state
*/
void mem_ump_destroy(struct mem_ump_binding *b)
{
flounder_support_waitset_chanstate_destroy(&((b->b).register_chanstate));
flounder_support_waitset_chanstate_destroy(&((b->b).tx_cont_chanstate));
ump_chan_destroy(&((b->ump_state).chan));
}
/*
* Bind function
*/
static void mem_ump_bind_continuation(void *st, errval_t err, struct ump_chan *chan, struct capref notify_cap)
{
struct mem_binding *_binding = st;
struct mem_ump_binding *b = st;
if (err_is_ok(err)) {
// notify cap ignored
// setup cap handlers
(((b->ump_state).chan).cap_handlers).st = b;
(((b->ump_state).chan).cap_handlers).cap_receive_handler = mem_ump_cap_rx_handler;
// register for receive notification
err = ump_chan_register_recv(&((b->ump_state).chan), _binding->waitset, (struct event_closure){ .handler = mem_ump_rx_handler, .arg = _binding });
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err_push(err, LIB_ERR_CHAN_REGISTER_RECV));
}
} else {
mem_ump_destroy(b);
}
(_binding->bind_cont)(_binding->st, err, _binding);
}
errval_t mem_ump_init(struct mem_ump_binding *b, struct waitset *waitset, volatile void *inbuf, size_t inbufsize, volatile void *outbuf, size_t outbufsize)
{
errval_t err;
struct mem_binding *_binding = &(b->b);
(b->b).st = NULL;
(b->b).waitset = waitset;
event_mutex_init(&((b->b).mutex), waitset);
(b->b).can_send = mem_ump_can_send;
(b->b).register_send = mem_ump_register_send;
(b->b).error_handler = mem_ump_default_error_handler;
(b->b).tx_vtbl = mem_ump_tx_vtbl;
memset(&((b->b).rx_vtbl), 0, sizeof((b->b).rx_vtbl));
flounder_support_waitset_chanstate_init(&((b->b).register_chanstate));
flounder_support_waitset_chanstate_init(&((b->b).tx_cont_chanstate));
(b->b).tx_msgnum = 0;
(b->b).rx_msgnum = 0;
(b->b).tx_msg_fragment = 0;
(b->b).rx_msg_fragment = 0;
(b->b).tx_str_pos = 0;
(b->b).rx_str_pos = 0;
(b->b).tx_str_len = 0;
(b->b).rx_str_len = 0;
(b->b).bind_cont = NULL;
flounder_stub_ump_state_init(&(b->ump_state), b);
err = ump_chan_init(&((b->ump_state).chan), inbuf, inbufsize, outbuf, outbufsize);
if (err_is_fail(err)) {
mem_ump_destroy(b);
return(err_push(err, LIB_ERR_UMP_CHAN_INIT));
}
(b->b).change_waitset = mem_ump_change_waitset;
(b->b).control = mem_ump_control;
// register for receive notification
err = ump_chan_register_recv(&((b->ump_state).chan), _binding->waitset, (struct event_closure){ .handler = mem_ump_rx_handler, .arg = _binding });
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err_push(err, LIB_ERR_CHAN_REGISTER_RECV));
}
return(err);
}
static void mem_ump_new_monitor_binding_continuation(void *st, errval_t err, struct monitor_binding *monitor_binding)
{
struct mem_binding *_binding = st;
struct mem_ump_binding *b = st;
if (err_is_fail(err)) {
err = err_push(err, LIB_ERR_MONITOR_CLIENT_BIND);
goto out;
}
((b->ump_state).chan).monitor_binding = monitor_binding;
// start the bind on the new monitor binding
err = ump_chan_bind(&((b->ump_state).chan), (struct ump_bind_continuation){ .handler = mem_ump_bind_continuation, .st = b }, &(_binding->event_qnode), b->iref, monitor_binding, b->inchanlen, b->outchanlen, NULL_CAP);
out:
if (err_is_fail(err)) {
(_binding->bind_cont)(_binding->st, err, _binding);
mem_ump_destroy(b);
}
}
errval_t mem_ump_bind(struct mem_ump_binding *b, iref_t iref, mem_bind_continuation_fn *_continuation, void *st, struct waitset *waitset, idc_bind_flags_t flags, size_t inchanlen, size_t outchanlen)
{
errval_t err;
(b->b).st = NULL;
(b->b).waitset = waitset;
event_mutex_init(&((b->b).mutex), waitset);
(b->b).can_send = mem_ump_can_send;
(b->b).register_send = mem_ump_register_send;
(b->b).error_handler = mem_ump_default_error_handler;
(b->b).tx_vtbl = mem_ump_tx_vtbl;
memset(&((b->b).rx_vtbl), 0, sizeof((b->b).rx_vtbl));
flounder_support_waitset_chanstate_init(&((b->b).register_chanstate));
flounder_support_waitset_chanstate_init(&((b->b).tx_cont_chanstate));
(b->b).tx_msgnum = 0;
(b->b).rx_msgnum = 0;
(b->b).tx_msg_fragment = 0;
(b->b).rx_msg_fragment = 0;
(b->b).tx_str_pos = 0;
(b->b).rx_str_pos = 0;
(b->b).tx_str_len = 0;
(b->b).rx_str_len = 0;
(b->b).bind_cont = NULL;
flounder_stub_ump_state_init(&(b->ump_state), b);
(b->b).change_waitset = mem_ump_change_waitset;
(b->b).control = mem_ump_control;
(b->b).st = st;
(b->b).bind_cont = _continuation;
b->iref = iref;
b->inchanlen = inchanlen;
b->outchanlen = outchanlen;
// do we need a new monitor binding?
if (flags & IDC_BIND_FLAG_RPC_CAP_TRANSFER) {
err = monitor_client_new_binding(mem_ump_new_monitor_binding_continuation, b, waitset, DEFAULT_LMP_BUF_WORDS);
} else {
err = ump_chan_bind(&((b->ump_state).chan), (struct ump_bind_continuation){ .handler = mem_ump_bind_continuation, .st = b }, &((b->b).event_qnode), iref, get_monitor_binding(), inchanlen, outchanlen, NULL_CAP);
}
if (err_is_fail(err)) {
mem_ump_destroy(b);
}
return(err);
}
/*
* Connect callback for export
*/
errval_t mem_ump_connect_handler(void *st, struct monitor_binding *mb, uintptr_t mon_id, struct capref frame, size_t inchanlen, size_t outchanlen, struct capref notify_cap)
{
struct mem_export *e = st;
errval_t err;
// allocate storage for binding
struct mem_ump_binding *b = malloc(sizeof(struct mem_ump_binding ));
if (b == NULL) {
return(LIB_ERR_MALLOC_FAIL);
}
struct mem_binding *_binding = &(b->b);
(b->b).st = NULL;
(b->b).waitset = (e->waitset);
event_mutex_init(&((b->b).mutex), e->waitset);
(b->b).can_send = mem_ump_can_send;
(b->b).register_send = mem_ump_register_send;
(b->b).error_handler = mem_ump_default_error_handler;
(b->b).tx_vtbl = mem_ump_tx_vtbl;
memset(&((b->b).rx_vtbl), 0, sizeof((b->b).rx_vtbl));
flounder_support_waitset_chanstate_init(&((b->b).register_chanstate));
flounder_support_waitset_chanstate_init(&((b->b).tx_cont_chanstate));
(b->b).tx_msgnum = 0;
(b->b).rx_msgnum = 0;
(b->b).tx_msg_fragment = 0;
(b->b).rx_msg_fragment = 0;
(b->b).tx_str_pos = 0;
(b->b).rx_str_pos = 0;
(b->b).tx_str_len = 0;
(b->b).rx_str_len = 0;
(b->b).bind_cont = NULL;
flounder_stub_ump_state_init(&(b->ump_state), b);
(b->b).change_waitset = mem_ump_change_waitset;
(b->b).control = mem_ump_control;
// run user's connect handler
err = ((e->connect_cb)(e->st, _binding));
if (err_is_fail(err)) {
// connection refused
mem_ump_destroy(b);
return(err);
}
// accept the connection and setup the channel
err = ump_chan_accept(&((b->ump_state).chan), mon_id, frame, inchanlen, outchanlen);
if (err_is_fail(err)) {
err = err_push(err, LIB_ERR_UMP_CHAN_ACCEPT);
(_binding->error_handler)(_binding, err);
return(err);
}
// notify cap ignored
// setup cap handlers
(((b->ump_state).chan).cap_handlers).st = b;
(((b->ump_state).chan).cap_handlers).cap_receive_handler = mem_ump_cap_rx_handler;
// register for receive notification
err = ump_chan_register_recv(&((b->ump_state).chan), _binding->waitset, (struct event_closure){ .handler = mem_ump_rx_handler, .arg = _binding });
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err_push(err, LIB_ERR_CHAN_REGISTER_RECV));
}
// send back bind reply
ump_chan_send_bind_reply(mb, &((b->ump_state).chan), SYS_ERR_OK, mon_id, NULL_CAP);
return(SYS_ERR_OK);
}
#endif // CONFIG_FLOUNDER_BACKEND_UMP
/*
* Copyright (c) 2010, ETH Zurich.
* All rights reserved.
*
* INTERFACE NAME: mem
* INTEFACE FILE: ../if/mem.if
* INTERFACE DESCRIPTION: Memory allocation RPC interface
*
* This file is distributed under the terms in the attached LICENSE
* file. If you do not find this file, copies can be found by
* writing to:
* ETH Zurich D-INFK, Universitaetstr.6, CH-8092 Zurich.
* Attn: Systems Group.
*
* THIS FILE IS AUTOMATICALLY GENERATED BY FLOUNDER: DO NOT EDIT!
*/
#ifdef CONFIG_FLOUNDER_BACKEND_MULTIHOP
/*
* Generated Stub for Multihop on x86_64
*/
#include <string.h>
#include <barrelfish/barrelfish.h>
#include <flounder/flounder_support.h>
#include <if/mem_defs.h>
/*
* Capability sender function
*/
static void mem_multihop_cap_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
// Switch on current outgoing message
switch (_binding->tx_msgnum) {
case mem_allocate_response__msgnum:
// Switch on current outgoing cap
switch ((mb->capst).tx_capnum) {
case 0:
err = multihop_send_capability(&(mb->chan), MKCONT(mem_multihop_cap_send_handler, _binding), &(mb->capst), ((_binding->tx_union).allocate_response).mem_cap);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_multihop_cap_send_handler, _binding));
assert(err_is_ok(err));
}
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
break;
}
break;
case 1:
if (multihop_chan_is_window_full(&(mb->chan))) {
mb->trigger_chan = true;
break;
} else {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
break;
}
default:
assert(!("invalid cap number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_steal_response__msgnum:
// Switch on current outgoing cap
switch ((mb->capst).tx_capnum) {
case 0:
err = multihop_send_capability(&(mb->chan), MKCONT(mem_multihop_cap_send_handler, _binding), &(mb->capst), ((_binding->tx_union).steal_response).mem_cap);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_multihop_cap_send_handler, _binding));
assert(err_is_ok(err));
}
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
break;
}
break;
case 1:
if (multihop_chan_is_window_full(&(mb->chan))) {
mb->trigger_chan = true;
break;
} else {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
break;
}
default:
assert(!("invalid cap number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_free_monitor_call__msgnum:
// Switch on current outgoing cap
switch ((mb->capst).tx_capnum) {
case 0:
err = multihop_send_capability(&(mb->chan), MKCONT(mem_multihop_cap_send_handler, _binding), &(mb->capst), ((_binding->tx_union).free_monitor_call).mem_cap);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_multihop_cap_send_handler, _binding));
assert(err_is_ok(err));
}
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
break;
}
break;
case 1:
if (multihop_chan_is_window_full(&(mb->chan))) {
mb->trigger_chan = true;
break;
} else {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
break;
}
default:
assert(!("invalid cap number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
default:
assert(!("invalid message number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
}
/*
* Send handler functions
*/
static void mem_allocate_call__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 24;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode,ArgFieldFragment uint8 [NamedField "bits"] 0],[ArgFieldFragment uint64 [NamedField "minbase"] 0],[ArgFieldFragment uint64 [NamedField "maxlimit"] 0]]
msg[0] = (mem_allocate_call__msgnum | (((uint64_t )(((_binding->tx_union).allocate_call).bits)) << 16));
msg[1] = (((_binding->tx_union).allocate_call).minbase);
msg[2] = (((_binding->tx_union).allocate_call).maxlimit);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_allocate_call__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_allocate_call__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
if (err_is_fail(err)) {
break;
} else {
return;
}
case 1:
// all fragments are sent
free(mb->message);
if (multihop_chan_is_window_full(&(mb->chan))) {
mb->trigger_chan = true;
return;
} else {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
static void mem_allocate_response__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 16;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode],[ArgFieldFragment uint64 [NamedField "ret"] 0]]
msg[0] = mem_allocate_response__msgnum;
msg[1] = (((_binding->tx_union).allocate_response).ret);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_allocate_response__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_allocate_response__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
if (err_is_fail(err)) {
break;
} else {
return;
}
case 1:
free(mb->message);
// send caps
mem_multihop_cap_send_handler(mb);
return;
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
static void mem_steal_call__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 24;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode,ArgFieldFragment uint8 [NamedField "bits"] 0],[ArgFieldFragment uint64 [NamedField "minbase"] 0],[ArgFieldFragment uint64 [NamedField "maxlimit"] 0]]
msg[0] = (mem_steal_call__msgnum | (((uint64_t )(((_binding->tx_union).steal_call).bits)) << 16));
msg[1] = (((_binding->tx_union).steal_call).minbase);
msg[2] = (((_binding->tx_union).steal_call).maxlimit);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_steal_call__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_steal_call__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
if (err_is_fail(err)) {
break;
} else {
return;
}
case 1:
// all fragments are sent
free(mb->message);
if (multihop_chan_is_window_full(&(mb->chan))) {
mb->trigger_chan = true;
return;
} else {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
static void mem_steal_response__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 16;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode],[ArgFieldFragment uint64 [NamedField "ret"] 0]]
msg[0] = mem_steal_response__msgnum;
msg[1] = (((_binding->tx_union).steal_response).ret);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_steal_response__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_steal_response__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
if (err_is_fail(err)) {
break;
} else {
return;
}
case 1:
free(mb->message);
// send caps
mem_multihop_cap_send_handler(mb);
return;
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
static void mem_available_call__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 8;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode]]
msg[0] = mem_available_call__msgnum;
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_available_call__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_available_call__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
if (err_is_fail(err)) {
break;
} else {
return;
}
case 1:
// all fragments are sent
free(mb->message);
if (multihop_chan_is_window_full(&(mb->chan))) {
mb->trigger_chan = true;
return;
} else {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
static void mem_available_response__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 24;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode],[ArgFieldFragment uint64 [NamedField "mem_avail"] 0],[ArgFieldFragment uint64 [NamedField "mem_total"] 0]]
msg[0] = mem_available_response__msgnum;
msg[1] = (((_binding->tx_union).available_response).mem_avail);
msg[2] = (((_binding->tx_union).available_response).mem_total);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_available_response__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_available_response__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
if (err_is_fail(err)) {
break;
} else {
return;
}
case 1:
// all fragments are sent
free(mb->message);
if (multihop_chan_is_window_full(&(mb->chan))) {
mb->trigger_chan = true;
return;
} else {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
static void mem_free_monitor_call__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 16;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode,ArgFieldFragment uint8 [NamedField "bits"] 0],[ArgFieldFragment uint64 [NamedField "base"] 0]]
msg[0] = (mem_free_monitor_call__msgnum | (((uint64_t )(((_binding->tx_union).free_monitor_call).bits)) << 16));
msg[1] = (((_binding->tx_union).free_monitor_call).base);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_free_monitor_call__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_free_monitor_call__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
if (err_is_fail(err)) {
break;
} else {
return;
}
case 1:
free(mb->message);
// send caps
mem_multihop_cap_send_handler(mb);
return;
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
static void mem_free_monitor_response__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 16;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode],[ArgFieldFragment uint64 [NamedField "err"] 0]]
msg[0] = mem_free_monitor_response__msgnum;
msg[1] = (((_binding->tx_union).free_monitor_response).err);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_free_monitor_response__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_free_monitor_response__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
if (err_is_fail(err)) {
break;
} else {
return;
}
case 1:
// all fragments are sent
free(mb->message);
if (multihop_chan_is_window_full(&(mb->chan))) {
mb->trigger_chan = true;
return;
} else {
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
return;
}
default:
assert(!("invalid fragment"));
err = FLOUNDER_ERR_INVALID_STATE;
}
// Report error to user
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
}
/*
* Cap receive handlers
*/
void mem_multihop_caps_rx_handler(void *arg, errval_t success, struct capref cap, uint32_t capid)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
assert(capid == ((mb->capst).rx_capnum));
// Check if there's an associated error
// FIXME: how should we report this to the user? at present we just deliver a NULL capref
if (err_is_fail(success)) {
DEBUG_ERR(success, "could not send cap over multihop channel");
}
// Switch on current incoming message
switch (_binding->rx_msgnum) {
case mem_allocate_response__msgnum:
// Switch on current incoming cap
switch (((mb->capst).rx_capnum)++) {
case 0:
((_binding->rx_union).allocate_response).mem_cap = cap;
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
FL_DEBUG("multihop RX mem.allocate_response\n");
assert(((_binding->rx_vtbl).allocate_response) != NULL);
((_binding->rx_vtbl).allocate_response)(_binding, ((_binding->rx_union).allocate_response).ret, ((_binding->rx_union).allocate_response).mem_cap);
_binding->rx_msgnum = 0;
break;
default:
assert(!("invalid cap number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_steal_response__msgnum:
// Switch on current incoming cap
switch (((mb->capst).rx_capnum)++) {
case 0:
((_binding->rx_union).steal_response).mem_cap = cap;
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
FL_DEBUG("multihop RX mem.steal_response\n");
assert(((_binding->rx_vtbl).steal_response) != NULL);
((_binding->rx_vtbl).steal_response)(_binding, ((_binding->rx_union).steal_response).ret, ((_binding->rx_union).steal_response).mem_cap);
_binding->rx_msgnum = 0;
break;
default:
assert(!("invalid cap number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
case mem_free_monitor_call__msgnum:
// Switch on current incoming cap
switch (((mb->capst).rx_capnum)++) {
case 0:
((_binding->rx_union).free_monitor_call).mem_cap = cap;
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
FL_DEBUG("multihop RX mem.free_monitor_call\n");
assert(((_binding->rx_vtbl).free_monitor_call) != NULL);
((_binding->rx_vtbl).free_monitor_call)(_binding, ((_binding->rx_union).free_monitor_call).mem_cap, ((_binding->rx_union).free_monitor_call).base, ((_binding->rx_union).free_monitor_call).bits);
_binding->rx_msgnum = 0;
break;
default:
assert(!("invalid cap number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
break;
default:
assert(!("invalid message number"));
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
}
}
/*
* Message sender functions
*/
static errval_t mem_allocate_call__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, uint8_t bits, mem_genpaddr_t minbase, mem_genpaddr_t maxlimit)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and the arguments
_binding->tx_msgnum = mem_allocate_call__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).allocate_call).bits = bits;
((_binding->tx_union).allocate_call).minbase = minbase;
((_binding->tx_union).allocate_call).maxlimit = maxlimit;
FL_DEBUG("multihop TX mem.allocate_call\n");
// try to send!
mem_allocate_call__multihop_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_allocate_response__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t ret, struct capref mem_cap)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and the arguments
_binding->tx_msgnum = mem_allocate_response__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).allocate_response).ret = ret;
((_binding->tx_union).allocate_response).mem_cap = mem_cap;
FL_DEBUG("multihop TX mem.allocate_response\n");
// try to send!
mem_allocate_response__multihop_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_steal_call__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, uint8_t bits, mem_genpaddr_t minbase, mem_genpaddr_t maxlimit)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and the arguments
_binding->tx_msgnum = mem_steal_call__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).steal_call).bits = bits;
((_binding->tx_union).steal_call).minbase = minbase;
((_binding->tx_union).steal_call).maxlimit = maxlimit;
FL_DEBUG("multihop TX mem.steal_call\n");
// try to send!
mem_steal_call__multihop_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_steal_response__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t ret, struct capref mem_cap)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and the arguments
_binding->tx_msgnum = mem_steal_response__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).steal_response).ret = ret;
((_binding->tx_union).steal_response).mem_cap = mem_cap;
FL_DEBUG("multihop TX mem.steal_response\n");
// try to send!
mem_steal_response__multihop_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_available_call__multihop_send(struct mem_binding *_binding, struct event_closure _continuation)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and the arguments
_binding->tx_msgnum = mem_available_call__msgnum;
_binding->tx_msg_fragment = 0;
FL_DEBUG("multihop TX mem.available_call\n");
// try to send!
mem_available_call__multihop_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_available_response__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, mem_genpaddr_t mem_avail, mem_genpaddr_t mem_total)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and the arguments
_binding->tx_msgnum = mem_available_response__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).available_response).mem_avail = mem_avail;
((_binding->tx_union).available_response).mem_total = mem_total;
FL_DEBUG("multihop TX mem.available_response\n");
// try to send!
mem_available_response__multihop_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_free_monitor_call__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, struct capref mem_cap, mem_genpaddr_t base, uint8_t bits)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and the arguments
_binding->tx_msgnum = mem_free_monitor_call__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).free_monitor_call).mem_cap = mem_cap;
((_binding->tx_union).free_monitor_call).base = base;
((_binding->tx_union).free_monitor_call).bits = bits;
FL_DEBUG("multihop TX mem.free_monitor_call\n");
// try to send!
mem_free_monitor_call__multihop_send_handler(_binding);
return(SYS_ERR_OK);
}
static errval_t mem_free_monitor_response__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t err)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
// register send continuation
if ((_continuation.handler) != NULL) {
errval_t _err;
_err = flounder_support_register(_binding->waitset, &(_binding->tx_cont_chanstate), _continuation, false);
// may fail if previous continuation hasn't fired yet
if (err_is_fail(_err)) {
if (err_no(_err) == LIB_ERR_CHAN_ALREADY_REGISTERED) {
return(FLOUNDER_ERR_TX_BUSY);
} else {
assert(!("shouldn't happen"));
return(_err);
}
}
}
// store message number and the arguments
_binding->tx_msgnum = mem_free_monitor_response__msgnum;
_binding->tx_msg_fragment = 0;
((_binding->tx_union).free_monitor_response).err = err;
FL_DEBUG("multihop TX mem.free_monitor_response\n");
// try to send!
mem_free_monitor_response__multihop_send_handler(_binding);
return(SYS_ERR_OK);
}
/*
* Send vtable
*/
static struct mem_tx_vtbl mem_multihop_tx_vtbl = {
.allocate_call = mem_allocate_call__multihop_send,
.allocate_response = mem_allocate_response__multihop_send,
.steal_call = mem_steal_call__multihop_send,
.steal_response = mem_steal_response__multihop_send,
.available_call = mem_available_call__multihop_send,
.available_response = mem_available_response__multihop_send,
.free_monitor_call = mem_free_monitor_call__multihop_send,
.free_monitor_response = mem_free_monitor_response__multihop_send,
};
/*
* Receive handler
*/
void mem_multihop_rx_handler(void *arg, uint8_t *message, size_t message_len)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
uint8_t *msg;
// if this a dummy message?
if (message_len == 0) {
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
return;
}
// is this the start of a new message?
if ((_binding->rx_msgnum) == 0) {
// unmarshall message number from first word, set fragment to 0
_binding->rx_msgnum = ((message[0]) & 0xffff);
_binding->rx_msg_fragment = 0;
(mb->capst).rx_capnum = 0;
} else {
assert(!"should not happen");
}
// switch on message number
switch (_binding->rx_msgnum) {
case mem_allocate_call__msgnum:
// store fixed size fragments
((_binding->rx_union).allocate_call).bits = (((((uint64_t *)(message))[0]) >> 16) & 0xff);
((_binding->rx_union).allocate_call).minbase = (((uint64_t *)(message))[1]);
((_binding->rx_union).allocate_call).maxlimit = (((uint64_t *)(message))[2]);
msg = (message + 24);
// receive strings
// receive buffers
free(message);
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
FL_DEBUG("multihop RX mem.allocate_call\n");
assert(((_binding->rx_vtbl).allocate_call) != NULL);
((_binding->rx_vtbl).allocate_call)(_binding, ((_binding->rx_union).allocate_call).bits, ((_binding->rx_union).allocate_call).minbase, ((_binding->rx_union).allocate_call).maxlimit);
_binding->rx_msgnum = 0;
break;
case mem_allocate_response__msgnum:
// store fixed size fragments
((_binding->rx_union).allocate_response).ret = (((uint64_t *)(message))[1]);
msg = (message + 16);
// receive strings
// receive buffers
free(message);
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
(_binding->rx_msg_fragment)++;
break;
case mem_steal_call__msgnum:
// store fixed size fragments
((_binding->rx_union).steal_call).bits = (((((uint64_t *)(message))[0]) >> 16) & 0xff);
((_binding->rx_union).steal_call).minbase = (((uint64_t *)(message))[1]);
((_binding->rx_union).steal_call).maxlimit = (((uint64_t *)(message))[2]);
msg = (message + 24);
// receive strings
// receive buffers
free(message);
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
FL_DEBUG("multihop RX mem.steal_call\n");
assert(((_binding->rx_vtbl).steal_call) != NULL);
((_binding->rx_vtbl).steal_call)(_binding, ((_binding->rx_union).steal_call).bits, ((_binding->rx_union).steal_call).minbase, ((_binding->rx_union).steal_call).maxlimit);
_binding->rx_msgnum = 0;
break;
case mem_steal_response__msgnum:
// store fixed size fragments
((_binding->rx_union).steal_response).ret = (((uint64_t *)(message))[1]);
msg = (message + 16);
// receive strings
// receive buffers
free(message);
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
(_binding->rx_msg_fragment)++;
break;
case mem_available_call__msgnum:
// store fixed size fragments
msg = (message + 8);
// receive strings
// receive buffers
free(message);
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
FL_DEBUG("multihop RX mem.available_call\n");
assert(((_binding->rx_vtbl).available_call) != NULL);
((_binding->rx_vtbl).available_call)(_binding);
_binding->rx_msgnum = 0;
break;
case mem_available_response__msgnum:
// store fixed size fragments
((_binding->rx_union).available_response).mem_avail = (((uint64_t *)(message))[1]);
((_binding->rx_union).available_response).mem_total = (((uint64_t *)(message))[2]);
msg = (message + 24);
// receive strings
// receive buffers
free(message);
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
FL_DEBUG("multihop RX mem.available_response\n");
assert(((_binding->rx_vtbl).available_response) != NULL);
((_binding->rx_vtbl).available_response)(_binding, ((_binding->rx_union).available_response).mem_avail, ((_binding->rx_union).available_response).mem_total);
_binding->rx_msgnum = 0;
break;
case mem_free_monitor_call__msgnum:
// store fixed size fragments
((_binding->rx_union).free_monitor_call).bits = (((((uint64_t *)(message))[0]) >> 16) & 0xff);
((_binding->rx_union).free_monitor_call).base = (((uint64_t *)(message))[1]);
msg = (message + 16);
// receive strings
// receive buffers
free(message);
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
(_binding->rx_msg_fragment)++;
break;
case mem_free_monitor_response__msgnum:
// store fixed size fragments
((_binding->rx_union).free_monitor_response).err = (((uint64_t *)(message))[1]);
msg = (message + 16);
// receive strings
// receive buffers
free(message);
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
FL_DEBUG("multihop RX mem.free_monitor_response\n");
assert(((_binding->rx_vtbl).free_monitor_response) != NULL);
((_binding->rx_vtbl).free_monitor_response)(_binding, ((_binding->rx_union).free_monitor_response).err);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_INVALID_MSGNUM);
return;
}
}
/*
* Control functions
*/
static bool mem_multihop_can_send(struct mem_binding *b)
{
struct mem_multihop_binding *mb = (struct mem_multihop_binding *)(b);
return(((b->tx_msgnum) == 0) && (!multihop_chan_is_window_full(&(mb->chan))));
}
static errval_t mem_multihop_register_send(struct mem_binding *b, struct waitset *ws, struct event_closure _continuation)
{
return(flounder_support_register(ws, &(b->register_chanstate), _continuation, mem_multihop_can_send(b)));
}
static void mem_multihop_default_error_handler(struct mem_binding *b, errval_t err)
{
DEBUG_ERR(err, "asynchronous error in Flounder-generated mem multihop binding (default handler)");
abort();
}
static errval_t mem_multihop_change_waitset(struct mem_binding *_binding, struct waitset *ws)
{
struct mem_multihop_binding *mb = (void *)(_binding);
// change waitset on binding
_binding->waitset = ws;
// change waitset on multi-hop channel
return(multihop_chan_change_waitset(&(mb->chan), ws));
}
static errval_t mem_multihop_control(struct mem_binding *_binding, idc_control_t control)
{
// No control flags supported
return(SYS_ERR_OK);
}
/*
* Functions to initialise/destroy the binding state
*/
void mem_multihop_init(struct mem_multihop_binding *mb, struct waitset *waitset)
{
(mb->b).st = NULL;
(mb->b).waitset = waitset;
event_mutex_init(&((mb->b).mutex), waitset);
(mb->b).can_send = mem_multihop_can_send;
(mb->b).register_send = mem_multihop_register_send;
(mb->b).error_handler = mem_multihop_default_error_handler;
(mb->b).tx_vtbl = mem_multihop_tx_vtbl;
memset(&((mb->b).rx_vtbl), 0, sizeof((mb->b).rx_vtbl));
flounder_support_waitset_chanstate_init(&((mb->b).register_chanstate));
flounder_support_waitset_chanstate_init(&((mb->b).tx_cont_chanstate));
(mb->b).tx_msgnum = 0;
(mb->b).rx_msgnum = 0;
(mb->b).tx_msg_fragment = 0;
(mb->b).rx_msg_fragment = 0;
(mb->b).tx_str_pos = 0;
(mb->b).rx_str_pos = 0;
(mb->b).tx_str_len = 0;
(mb->b).rx_str_len = 0;
(mb->b).bind_cont = NULL;
(mb->b).change_waitset = mem_multihop_change_waitset;
(mb->b).control = mem_multihop_control;
mb->trigger_chan = false;
}
void mem_multihop_destroy(struct mem_multihop_binding *mb)
{
flounder_support_waitset_chanstate_destroy(&((mb->b).register_chanstate));
flounder_support_waitset_chanstate_destroy(&((mb->b).tx_cont_chanstate));
assert(! "NYI!");
}
/*
* Bind function
*/
static void mem_multihop_bind_continuation(void *st, errval_t err, struct multihop_chan *chan)
{
struct mem_multihop_binding *mb = st;
if (err_is_ok(err)) {
// set receive handlers
multihop_chan_set_receive_handler(&(mb->chan), (struct multihop_receive_handler){ .handler = mem_multihop_rx_handler, .arg = st });
multihop_chan_set_caps_receive_handlers(&(mb->chan), (struct monitor_cap_handlers){ .st = st, .cap_receive_handler = mem_multihop_caps_rx_handler });
} else {
mem_multihop_destroy(mb);
}
((mb->b).bind_cont)((mb->b).st, err, &(mb->b));
}
errval_t mem_multihop_bind(struct mem_multihop_binding *mb, iref_t iref, mem_bind_continuation_fn *_continuation, void *st, struct waitset *waitset, idc_bind_flags_t flags)
{
errval_t err;
mem_multihop_init(mb, waitset);
(mb->b).st = st;
(mb->b).bind_cont = _continuation;
err = multihop_chan_bind(&(mb->chan), (struct multihop_bind_continuation){ .handler = mem_multihop_bind_continuation, .st = mb }, iref, waitset);
if (err_is_fail(err)) {
mem_multihop_destroy(mb);
}
return(err);
}
/*
* Connect callback for export
*/
errval_t mem_multihop_connect_handler(void *st, multihop_vci_t vci)
{
struct mem_export *e = st;
errval_t err;
// allocate storage for binding
struct mem_multihop_binding *mb = malloc(sizeof(struct mem_multihop_binding ));
if (mb == NULL) {
return(LIB_ERR_MALLOC_FAIL);
}
// initialize binding
struct mem_binding *_binding = &(mb->b);
mem_multihop_init(mb, e->waitset);
(mb->chan).vci = vci;
// run user's connect handler
err = ((e->connect_cb)(e->st, _binding));
if (err_is_fail(err)) {
return(err);
}
// set receive handlers
multihop_chan_set_receive_handler(&(mb->chan), (struct multihop_receive_handler){ .handler = mem_multihop_rx_handler, .arg = mb });
multihop_chan_set_caps_receive_handlers(&(mb->chan), (struct monitor_cap_handlers){ .st = mb, .cap_receive_handler = mem_multihop_caps_rx_handler });
// send back bind reply
multihop_chan_send_bind_reply(&(mb->chan), SYS_ERR_OK, (mb->chan).vci, (mb->b).waitset);
return(err);
}
#endif // CONFIG_FLOUNDER_BACKEND_MULTIHOP
| daleooo/barrelfish | build/x86_64/lib/barrelfish/_for_lib_barrelfish/mem_flounder_bindings.c | C | mit | 156,625 |
#include <stdio.h>
#include <stdint.h>
#include <ibcrypt/chacha.h>
#include <ibcrypt/rand.h>
#include <ibcrypt/sha256.h>
#include <ibcrypt/zfree.h>
#include <libibur/util.h>
#include <libibur/endian.h>
#include "datafile.h"
#include "../util/log.h"
int write_datafile(char *path, void *arg, void *data, struct format_desc *f) {
int ret = -1;
uint8_t *payload = NULL;
uint64_t payload_len = 0;
uint64_t payload_num = 0;
uint8_t *prefix = NULL;
uint64_t pref_len = 0;
uint8_t symm_key[0x20];
uint8_t hmac_key[0x20];
uint8_t enc_key[0x20];
FILE *ff = fopen(path, "wb");
if(ff == NULL) {
ERR("failed to open file for writing: %s", path);
goto err;
}
pref_len = 0x50 + f->pref_len;
prefix = malloc(pref_len);
if(prefix == NULL) {
ERR("failed to allocate memory");
goto err;
}
void *cur = data;
while(cur) {
payload_len += f->datalen(cur);
payload_num++;
cur = *((void **) ((char*)cur + f->next_off));
}
encbe64(payload_num, &prefix[0]);
encbe64(payload_len, &prefix[8]);
if(cs_rand(&prefix[0x10], 0x20) != 0) {
ERR("failed to generate random numbers");
goto err;
}
if(f->p_fill(arg, &prefix[0x30]) != 0) {
goto err;
}
if(f->s_key(arg, &prefix[0x30], symm_key) != 0) {
goto err;
}
if(f->h_key(arg, &prefix[0x30], hmac_key) != 0) {
goto err;
}
hmac_sha256(hmac_key, 0x20, prefix, pref_len - 0x20, &prefix[pref_len - 0x20]);
SHA256_CTX kctx;
sha256_init(&kctx);
sha256_update(&kctx, symm_key, 0x20);
sha256_update(&kctx, &prefix[0x10], 0x20);
sha256_final(&kctx, enc_key);
payload = malloc(payload_len);
if(payload == NULL) {
ERR("failed to allocate memory");
goto err;
}
cur = data;
uint8_t *ptr = payload;
while(cur) {
ptr = f->datawrite(cur, ptr);
if(ptr == NULL) {
goto err;
}
cur = *((void **) ((char*)cur + f->next_off));
}
if(ptr - payload != payload_len) {
ERR("written length does not match expected");
goto err;
}
chacha_enc(enc_key, 0x20, 0, payload, payload, payload_len);
HMAC_SHA256_CTX hctx;
hmac_sha256_init(&hctx, hmac_key, 0x20);
hmac_sha256_update(&hctx, prefix, pref_len);
hmac_sha256_update(&hctx, payload, payload_len);
uint8_t mac[0x20];
hmac_sha256_final(&hctx, mac);
if(fwrite(prefix, 1, pref_len, ff) != pref_len) {
goto writerr;
}
if(payload_len > 0) {
if(fwrite(payload, 1, payload_len, ff) != payload_len) {
goto writerr;
}
}
if(fwrite(mac, 1, 0x20, ff) != 0x20) {
goto writerr;
}
ret = 0;
err:
if(ff) fclose(ff);
if(payload) zfree(payload, payload_len);
memsets(enc_key, 0, sizeof(enc_key));
memsets(symm_key, 0, sizeof(symm_key));
memsets(hmac_key, 0, sizeof(hmac_key));
return ret;
writerr:
ERR("failed to write to file: %s", path);
goto err;
}
int read_datafile(char *path, void *arg, void **data, struct format_desc *f) {
int ret = -1;
uint8_t *payload = NULL;
uint64_t payload_len = 0;
uint64_t payload_num = 0;
uint8_t *prefix = NULL;
uint64_t pref_len = 0;
uint8_t symm_key[0x20];
uint8_t hmac_key[0x20];
uint8_t enc_key[0x20];
uint8_t mac1[0x20];
uint8_t mac2c[0x20];
uint8_t mac2f[0x20];
FILE *ff = fopen(path, "rb");
if(ff == NULL) {
ERR("failed to open file for reading: %s", path);
goto err;
}
pref_len = 0x50 + f->pref_len;
prefix = malloc(pref_len);
if(prefix == NULL) {
ERR("failed to allocate memory");
goto err;
}
if(fread(prefix, 1, pref_len, ff) != pref_len) {
goto readerr;
}
payload_num = decbe64(&prefix[0]);
payload_len = decbe64(&prefix[8]);
if(f->s_key(arg, &prefix[0x30], symm_key) != 0) {
goto err;
}
if(f->h_key(arg, &prefix[0x30], hmac_key) != 0) {
goto err;
}
hmac_sha256(hmac_key, 0x20, prefix, pref_len - 0x20, mac1);
if(memcmp_ct(mac1, &prefix[pref_len-0x20], 0x20) != 0) {
ERR("invalid file");
goto err;
}
SHA256_CTX kctx;
sha256_init(&kctx);
sha256_update(&kctx, symm_key, 0x20);
sha256_update(&kctx, &prefix[0x10], 0x20);
sha256_final(&kctx, enc_key);
payload = malloc(payload_len);
if(payload == NULL) {
ERR("failed to allocate memory");
goto err;
}
if(fread(payload, 1, payload_len, ff) != payload_len) {
goto readerr;
}
if(fread(mac2f, 1, 0x20, ff) != 0x20) {
goto readerr;
}
HMAC_SHA256_CTX hctx;
hmac_sha256_init(&hctx, hmac_key, 0x20);
hmac_sha256_update(&hctx, prefix, pref_len);
hmac_sha256_update(&hctx, payload, payload_len);
hmac_sha256_final(&hctx, mac2c);
if(memcmp_ct(mac2c, mac2f, 0x20) != 0) {
ERR("invalid file");
goto err;
}
chacha_dec(enc_key, 0x20, 0, payload, payload, payload_len);
void **cur = data;
uint8_t *ptr = payload;
uint64_t i;
for(i = 0; (ptr - payload) < payload_len && i < payload_num; i++) {
ptr = f->dataread(cur, arg, ptr);
if(ptr == NULL) {
goto err;
}
cur = (void **) ((char*)(*cur) + f->next_off);
}
*cur = NULL;
if(i != payload_num) {
ERR("read num does not match expected");
goto err;
}
if(ptr - payload != payload_len) {
ERR("read length does not match expected");
goto err;
}
ret = 0;
err:
if(ff) fclose(ff);
if(payload) zfree(payload, payload_len);
memsets(enc_key, 0, sizeof(enc_key));
memsets(symm_key, 0, sizeof(symm_key));
memsets(hmac_key, 0, sizeof(hmac_key));
return ret;
readerr:
ERR("failed to read from file: %s", path);
goto err;
}
| iburinoc/ibchat | client/datafile.c | C | mit | 5,238 |
/**
* @file main.c
* @brief Main routine
*
* @section License
*
* Copyright (C) 2010-2015 Oryx Embedded SARL. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* @author Oryx Embedded SARL (www.oryx-embedded.com)
* @version 1.6.4
**/
//Dependencies
#include <stdlib.h>
#include "stm32f4xx.h"
#include "stm32f4_discovery.h"
#include "stm32f4_discovery_lcd.h"
#include "os_port.h"
#include "core/net.h"
#include "drivers/stm32f4x7_eth.h"
#include "drivers/lan8720.h"
#include "dhcp/dhcp_client.h"
#include "ipv6/slaac.h"
#include "smtp/smtp_client.h"
#include "yarrow.h"
#include "error.h"
#include "debug.h"
//Application configuration
#define APP_MAC_ADDR "00-AB-CD-EF-04-07"
#define APP_USE_DHCP ENABLED
#define APP_IPV4_HOST_ADDR "192.168.0.20"
#define APP_IPV4_SUBNET_MASK "255.255.255.0"
#define APP_IPV4_DEFAULT_GATEWAY "192.168.0.254"
#define APP_IPV4_PRIMARY_DNS "8.8.8.8"
#define APP_IPV4_SECONDARY_DNS "8.8.4.4"
#define APP_USE_SLAAC ENABLED
#define APP_IPV6_LINK_LOCAL_ADDR "fe80::407"
#define APP_IPV6_PREFIX "2001:db8::"
#define APP_IPV6_PREFIX_LENGTH 64
#define APP_IPV6_GLOBAL_ADDR "2001:db8::407"
#define APP_IPV6_ROUTER "fe80::1"
#define APP_IPV6_PRIMARY_DNS "2001:4860:4860::8888"
#define APP_IPV6_SECONDARY_DNS "2001:4860:4860::8844"
//Global variables
uint_t lcdLine = 0;
uint_t lcdColumn = 0;
DhcpClientSettings dhcpClientSettings;
DhcpClientCtx dhcpClientContext;
SlaacSettings slaacSettings;
SlaacContext slaacContext;
YarrowContext yarrowContext;
uint8_t seed[32];
/**
* @brief Set cursor location
* @param[in] line Line number
* @param[in] column Column number
**/
void lcdSetCursor(uint_t line, uint_t column)
{
lcdLine = MIN(line, 10);
lcdColumn = MIN(column, 20);
}
/**
* @brief Write a character to the LCD display
* @param[in] c Character to be written
**/
void lcdPutChar(char_t c)
{
if(c == '\r')
{
lcdColumn = 0;
}
else if(c == '\n')
{
lcdColumn = 0;
lcdLine++;
}
else if(lcdLine < 10 && lcdColumn < 20)
{
//Display current character
LCD_DisplayChar(lcdLine * 24, lcdColumn * 16, c);
//Advance the cursor position
if(++lcdColumn >= 20)
{
lcdColumn = 0;
lcdLine++;
}
}
}
/**
* @brief I/O initialization
**/
void ioInit(void)
{
GPIO_InitTypeDef GPIO_InitStructure;
//LED configuration
STM_EVAL_LEDInit(LED3);
STM_EVAL_LEDInit(LED4);
STM_EVAL_LEDInit(LED5);
STM_EVAL_LEDInit(LED6);
//Clear LEDs
STM_EVAL_LEDOff(LED3);
STM_EVAL_LEDOff(LED4);
STM_EVAL_LEDOff(LED5);
STM_EVAL_LEDOff(LED6);
//Initialize user button
STM_EVAL_PBInit(BUTTON_USER, BUTTON_MODE_GPIO);
//Enable GPIOE clock
RCC_AHB1PeriphClockCmd(RCC_AHB1Periph_GPIOE, ENABLE);
//Configure PE2 (PHY_RST) pin as an output
GPIO_InitStructure.GPIO_Pin = GPIO_Pin_2;
GPIO_InitStructure.GPIO_Mode = GPIO_Mode_OUT;
GPIO_InitStructure.GPIO_OType = GPIO_OType_PP;
GPIO_InitStructure.GPIO_PuPd = GPIO_PuPd_NOPULL;
GPIO_InitStructure.GPIO_Speed = GPIO_Speed_50MHz;
GPIO_Init(GPIOE, &GPIO_InitStructure);
//Reset PHY transceiver (hard reset)
GPIO_ResetBits(GPIOE, GPIO_Pin_2);
sleep(10);
GPIO_SetBits(GPIOE, GPIO_Pin_2);
sleep(10);
}
/**
* @brief SMTP client test routine
* @return Error code
**/
error_t smtpClientTest(void)
{
error_t error;
//Authentication information
static SmtpAuthInfo authInfo =
{
NULL, //Network interface
"smtp.gmail.com", //SMTP server name
25, //SMTP server port
"username", //User name
"password", //Password
FALSE, //Use STARTTLS rather than implicit TLS
YARROW_PRNG_ALGO, //PRNG algorithm
&yarrowContext //PRNG context
};
//Recipients
static SmtpMailAddr recipients[2] =
{
{"Alice", "alice@example.com", SMTP_RCPT_TYPE_TO}, //First recipient
{"Bob", "bob@example.com", SMTP_RCPT_TYPE_CC} //Second recipient
};
//Mail contents
static SmtpMail mail =
{
{"Charlie", "charlie@gmail.com"}, //From
recipients, //Recipients
2, //Recipient count
"", //Date
"SMTP Client Demo", //Subject
"Hello World!" //Body
};
//Send mail
error = smtpSendMail(&authInfo, &mail);
//Return status code
return error;
}
/**
* @brief User task
**/
void userTask(void *param)
{
char_t buffer[40];
//Point to the network interface
NetInterface *interface = &netInterface[0];
//Initialize LCD display
lcdSetCursor(2, 0);
printf("IPv4 Addr\r\n");
lcdSetCursor(5, 0);
printf("Press user button\r\nto run test\r\n");
//Endless loop
while(1)
{
//Display IPv4 host address
lcdSetCursor(3, 0);
printf("%-16s\r\n", ipv4AddrToString(interface->ipv4Config.addr, buffer));
//User button pressed?
if(STM_EVAL_PBGetState(BUTTON_USER))
{
//SMTP client test routine
smtpClientTest();
//Wait for the user button to be released
while(STM_EVAL_PBGetState(BUTTON_USER));
}
//Loop delay
osDelayTask(100);
}
}
/**
* @brief LED blinking task
**/
void blinkTask(void *parameters)
{
//Endless loop
while(1)
{
STM_EVAL_LEDOn(LED4);
osDelayTask(100);
STM_EVAL_LEDOff(LED4);
osDelayTask(900);
}
}
/**
* @brief Main entry point
* @return Unused value
**/
int_t main(void)
{
error_t error;
uint_t i;
uint32_t value;
NetInterface *interface;
OsTask *task;
MacAddr macAddr;
#if (APP_USE_DHCP == DISABLED)
Ipv4Addr ipv4Addr;
#endif
#if (APP_USE_SLAAC == DISABLED)
Ipv6Addr ipv6Addr;
#endif
//Initialize kernel
osInitKernel();
//Configure debug UART
debugInit(115200);
//Start-up message
TRACE_INFO("\r\n");
TRACE_INFO("***********************************\r\n");
TRACE_INFO("*** CycloneTCP SMTP Client Demo ***\r\n");
TRACE_INFO("***********************************\r\n");
TRACE_INFO("Copyright: 2010-2015 Oryx Embedded SARL\r\n");
TRACE_INFO("Compiled: %s %s\r\n", __DATE__, __TIME__);
TRACE_INFO("Target: STM32F407\r\n");
TRACE_INFO("\r\n");
//Configure I/Os
ioInit();
//Initialize LCD display
STM32f4_Discovery_LCD_Init();
LCD_SetBackColor(Blue);
LCD_SetTextColor(White);
LCD_SetFont(&Font16x24);
LCD_Clear(Blue);
//Welcome message
lcdSetCursor(0, 0);
printf("SMTP Client Demo\r\n");
//Enable RNG peripheral clock
RCC_AHB2PeriphClockCmd(RCC_AHB2Periph_RNG, ENABLE);
//Enable RNG
RNG_Cmd(ENABLE);
//Generate a random seed
for(i = 0; i < 32; i += 4)
{
//Wait for the RNG to contain a valid data
while(RNG_GetFlagStatus(RNG_FLAG_DRDY) == RESET);
//Get 32-bit random value
value = RNG_GetRandomNumber();
//Copy random value
seed[i] = value & 0xFF;
seed[i + 1] = (value >> 8) & 0xFF;
seed[i + 2] = (value >> 16) & 0xFF;
seed[i + 3] = (value >> 24) & 0xFF;
}
//PRNG initialization
error = yarrowInit(&yarrowContext);
//Any error to report?
if(error)
{
//Debug message
TRACE_ERROR("Failed to initialize PRNG!\r\n");
}
//Properly seed the PRNG
error = yarrowSeed(&yarrowContext, seed, sizeof(seed));
//Any error to report?
if(error)
{
//Debug message
TRACE_ERROR("Failed to seed PRNG!\r\n");
}
//TCP/IP stack initialization
error = netInit();
//Any error to report?
if(error)
{
//Debug message
TRACE_ERROR("Failed to initialize TCP/IP stack!\r\n");
}
//Configure the first Ethernet interface
interface = &netInterface[0];
//Set interface name
netSetInterfaceName(interface, "eth0");
//Set host name
netSetHostname(interface, "SMTPClientDemo");
//Select the relevant network adapter
netSetDriver(interface, &stm32f4x7EthDriver);
netSetPhyDriver(interface, &lan8720PhyDriver);
//Set host MAC address
macStringToAddr(APP_MAC_ADDR, &macAddr);
netSetMacAddr(interface, &macAddr);
//Initialize network interface
error = netConfigInterface(interface);
//Any error to report?
if(error)
{
//Debug message
TRACE_ERROR("Failed to configure interface %s!\r\n", interface->name);
}
#if (IPV4_SUPPORT == ENABLED)
#if (APP_USE_DHCP == ENABLED)
//Get default settings
dhcpClientGetDefaultSettings(&dhcpClientSettings);
//Set the network interface to be configured by DHCP
dhcpClientSettings.interface = interface;
//Disable rapid commit option
dhcpClientSettings.rapidCommit = FALSE;
//DHCP client initialization
error = dhcpClientInit(&dhcpClientContext, &dhcpClientSettings);
//Failed to initialize DHCP client?
if(error)
{
//Debug message
TRACE_ERROR("Failed to initialize DHCP client!\r\n");
}
//Start DHCP client
error = dhcpClientStart(&dhcpClientContext);
//Failed to start DHCP client?
if(error)
{
//Debug message
TRACE_ERROR("Failed to start DHCP client!\r\n");
}
#else
//Set IPv4 host address
ipv4StringToAddr(APP_IPV4_HOST_ADDR, &ipv4Addr);
ipv4SetHostAddr(interface, ipv4Addr);
//Set subnet mask
ipv4StringToAddr(APP_IPV4_SUBNET_MASK, &ipv4Addr);
ipv4SetSubnetMask(interface, ipv4Addr);
//Set default gateway
ipv4StringToAddr(APP_IPV4_DEFAULT_GATEWAY, &ipv4Addr);
ipv4SetDefaultGateway(interface, ipv4Addr);
//Set primary and secondary DNS servers
ipv4StringToAddr(APP_IPV4_PRIMARY_DNS, &ipv4Addr);
ipv4SetDnsServer(interface, 0, ipv4Addr);
ipv4StringToAddr(APP_IPV4_SECONDARY_DNS, &ipv4Addr);
ipv4SetDnsServer(interface, 1, ipv4Addr);
#endif
#endif
#if (IPV6_SUPPORT == ENABLED)
#if (APP_USE_SLAAC == ENABLED)
//Get default settings
slaacGetDefaultSettings(&slaacSettings);
//Set the network interface to be configured
slaacSettings.interface = interface;
//SLAAC initialization
error = slaacInit(&slaacContext, &slaacSettings);
//Failed to initialize SLAAC?
if(error)
{
//Debug message
TRACE_ERROR("Failed to initialize SLAAC!\r\n");
}
//Start IPv6 address autoconfiguration process
error = slaacStart(&slaacContext);
//Failed to start SLAAC process?
if(error)
{
//Debug message
TRACE_ERROR("Failed to start SLAAC!\r\n");
}
#else
//Set link-local address
ipv6StringToAddr(APP_IPV6_LINK_LOCAL_ADDR, &ipv6Addr);
ipv6SetLinkLocalAddr(interface, &ipv6Addr);
//Set IPv6 prefix
ipv6StringToAddr(APP_IPV6_PREFIX, &ipv6Addr);
ipv6SetPrefix(interface, &ipv6Addr, APP_IPV6_PREFIX_LENGTH);
//Set global address
ipv6StringToAddr(APP_IPV6_GLOBAL_ADDR, &ipv6Addr);
ipv6SetGlobalAddr(interface, &ipv6Addr);
//Set router
ipv6StringToAddr(APP_IPV6_ROUTER, &ipv6Addr);
ipv6SetRouter(interface, &ipv6Addr);
//Set primary and secondary DNS servers
ipv6StringToAddr(APP_IPV6_PRIMARY_DNS, &ipv6Addr);
ipv6SetDnsServer(interface, 0, &ipv6Addr);
ipv6StringToAddr(APP_IPV6_SECONDARY_DNS, &ipv6Addr);
ipv6SetDnsServer(interface, 1, &ipv6Addr);
#endif
#endif
//Create user task
task = osCreateTask("User Task", userTask, NULL, 800, 1);
//Failed to create the task?
if(task == OS_INVALID_HANDLE)
{
//Debug message
TRACE_ERROR("Failed to create task!\r\n");
}
//Create a task to blink the LED
task = osCreateTask("Blink", blinkTask, NULL, 500, 1);
//Failed to create the task?
if(task == OS_INVALID_HANDLE)
{
//Debug message
TRACE_ERROR("Failed to create task!\r\n");
}
//Start the execution of tasks
osStartKernel();
//This function should never return
return 0;
}
| miragecentury/M2_SE_RTOS_Project | Project/LPC1549_Keil/CycloneTCP_SSL_Crypto_Open_1_6_4/demo/st/stm32f4_discovery/smtp_client_demo/src/main.c | C | mit | 12,503 |
#include <assert.h>
#include <SDL2/SDL.h>
#include <SDL2/SDL_ttf.h>
#include <video/gl.h>
#include <xxhash.h>
#include <memtrack.h>
#include "base/stack.h"
#include "core/common.h"
#include "base/math_ext.h"
#include "core/asset.h"
#include "core/configs.h"
#include "core/frame.h"
#include "core/logerr.h"
#include <core/application.h>
#include "core/video.h"
#include "video/resources_detail.h"
#include <core/audio.h>
static int running = 1;
static STACK *states_stack;
static APP_STATE *allstates;
static size_t states_num;
NEON_API void
application_next_state(unsigned int state) {
if (state > states_num) {
LOG_ERROR("State(%d) out of range", state);
exit(EXIT_FAILURE);
}
push_stack(states_stack, &allstates[state]);
((APP_STATE*)top_stack(states_stack))->on_init();
frame_flush();
}
NEON_API void
application_back_state(void) {
((APP_STATE*)pop_stack(states_stack))->on_cleanup();
frame_flush();
}
static void
application_cleanup(void) {
configs_cleanup();
asset_close();
audio_cleanup();
video_cleanup();
while (!is_stack_empty(states_stack))
application_back_state();
delete_stack(states_stack);
}
#ifdef OPENAL_BACKEND
#define SDL_INIT_FLAGS (SDL_INIT_VIDEO | SDL_INIT_TIMER)
#else
#define SDL_INIT_FLAGS (SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)
#endif
NEON_API int
application_exec(const char *title, APP_STATE *states, size_t states_n) {
allstates = states;
states_num = states_n;
if (SDL_Init(SDL_INIT_EVERYTHING) < 0) {
LOG_ERROR("%s\n", SDL_GetError());
return EXIT_FAILURE;
}
atexit(SDL_Quit);
if (TTF_Init() < 0) {
LOG_ERROR("%s\n", TTF_GetError());
return EXIT_FAILURE;
}
atexit(TTF_Quit);
if ((states_stack = new_stack(sizeof(APP_STATE), states_n + 1)) == NULL) {
LOG_ERROR("%s\n", "Can\'t create game states stack");
return EXIT_FAILURE;
}
LOG("%s launched...\n", title);
LOG("Platform: %s\n", SDL_GetPlatform());
video_init(title);
audio_init();
atexit(application_cleanup);
application_next_state(0);
if (is_stack_empty(states_stack)) {
LOG_CRITICAL("%s\n", "No game states");
exit(EXIT_FAILURE);
}
SDL_Event event;
Uint64 current = 0;
Uint64 last = 0;
float accumulator = 0.0f;
while(running) {
frame_begin();
while(SDL_PollEvent(&event)) {
((APP_STATE*)top_stack(states_stack))->on_event(&event);
}
asset_process();
resources_process();
last = current;
current = SDL_GetPerformanceCounter();
Uint64 freq = SDL_GetPerformanceFrequency();
float delta = (double)(current - last) / (double)freq;
accumulator += CLAMP(delta, 0.f, 0.2f);
while(accumulator >= TIMESTEP) {
accumulator -= TIMESTEP;
((APP_STATE*)top_stack(states_stack))->on_update(TIMESTEP);
}
((APP_STATE*)top_stack(states_stack))->on_present(screen.width, screen.height, accumulator / TIMESTEP);
video_swap_buffers();
frame_end();
SDL_Delay(1);
}
return EXIT_SUCCESS;
}
NEON_API void
application_quit(void) {
running = 0;
}
| m1nuz/neon-core | neon/src/core/application.c | C | mit | 3,306 |
//
// MultibandBank.c
// FxDSP
//
// Created by Hamilton Kibbe on 11/24/13.
// Copyright (c) 2013 Hamilton Kibbe. All rights reserved.
//
#include "MultibandBank.h"
#include "LinkwitzRileyFilter.h"
#include "RBJFilter.h"
#include "FilterTypes.h"
#include <stdlib.h>
// Sqrt(2)/2
#define FILT_Q (0.70710681186548)
/*******************************************************************************
MultibandFilter */
struct MultibandFilter
{
LRFilter* LPA;
LRFilter* HPA;
LRFilter* LPB;
LRFilter* HPB;
RBJFilter* APF;
float lowCutoff;
float highCutoff;
float sampleRate;
};
struct MultibandFilterD
{
LRFilterD* LPA;
LRFilterD* HPA;
LRFilterD* LPB;
LRFilterD* HPB;
RBJFilterD* APF;
double lowCutoff;
double highCutoff;
double sampleRate;
};
/*******************************************************************************
MultibandFilterInit */
MultibandFilter*
MultibandFilterInit(float lowCutoff,
float highCutoff,
float sampleRate)
{
MultibandFilter* filter = (MultibandFilter*) malloc(sizeof(MultibandFilter));
filter->lowCutoff = lowCutoff;
filter->highCutoff = highCutoff;
filter->sampleRate = sampleRate;
filter->LPA = LRFilterInit(LOWPASS, filter->lowCutoff, FILT_Q, filter->sampleRate);
filter->HPA = LRFilterInit(HIGHPASS, filter->lowCutoff, FILT_Q, filter->sampleRate);
filter->LPB = LRFilterInit(LOWPASS, filter->highCutoff, FILT_Q, filter->sampleRate);
filter->HPB = LRFilterInit(HIGHPASS, filter->highCutoff, FILT_Q, filter->sampleRate);
filter->APF = RBJFilterInit(ALLPASS, filter->sampleRate/2.0, filter->sampleRate);
RBJFilterSetQ(filter->APF, 0.5);
return filter;
}
MultibandFilterD*
MultibandFilterInitD(double lowCutoff,
double highCutoff,
double sampleRate)
{
MultibandFilterD* filter = (MultibandFilterD*) malloc(sizeof(MultibandFilterD));
filter->lowCutoff = lowCutoff;
filter->highCutoff = highCutoff;
filter->sampleRate = sampleRate;
filter->LPA = LRFilterInitD(LOWPASS, filter->lowCutoff, FILT_Q, filter->sampleRate);
filter->HPA = LRFilterInitD(HIGHPASS, filter->lowCutoff, FILT_Q, filter->sampleRate);
filter->LPB = LRFilterInitD(LOWPASS, filter->highCutoff, FILT_Q, filter->sampleRate);
filter->HPB = LRFilterInitD(HIGHPASS, filter->highCutoff, FILT_Q, filter->sampleRate);
filter->APF = RBJFilterInitD(ALLPASS, filter->sampleRate/2.0, filter->sampleRate);
RBJFilterSetQD(filter->APF, 0.5);
return filter;
}
/*******************************************************************************
MultibandFilterFree */
Error_t
MultibandFilterFree(MultibandFilter* filter)
{
LRFilterFree(filter->LPA);
LRFilterFree(filter->LPB);
LRFilterFree(filter->HPA);
LRFilterFree(filter->HPB);
RBJFilterFree(filter->APF);
if (filter)
{
free(filter);
filter = NULL;
}
return NOERR;
}
Error_t
MultibandFilterFreeD(MultibandFilterD* filter)
{
LRFilterFreeD(filter->LPA);
LRFilterFreeD(filter->LPB);
LRFilterFreeD(filter->HPA);
LRFilterFreeD(filter->HPB);
RBJFilterFreeD(filter->APF);
if (filter)
{
free(filter);
filter = NULL;
}
return NOERR;
}
/*******************************************************************************
MultibandFilterFlush */
Error_t
MultibandFilterFlush(MultibandFilter* filter)
{
LRFilterFlush(filter->LPA);
LRFilterFlush(filter->LPB);
LRFilterFlush(filter->HPA);
LRFilterFlush(filter->HPB);
RBJFilterFlush(filter->APF);
return NOERR;
}
Error_t
MultibandFilterFlushD(MultibandFilterD* filter)
{
LRFilterFlushD(filter->LPA);
LRFilterFlushD(filter->LPB);
LRFilterFlushD(filter->HPA);
LRFilterFlushD(filter->HPB);
RBJFilterFlushD(filter->APF);
return NOERR;
}
/*******************************************************************************
MultibandFilterSetLowCutoff */
Error_t
MultibandFilterSetLowCutoff(MultibandFilter* filter, float lowCutoff)
{
filter->lowCutoff = lowCutoff;
LRFilterSetParams(filter->LPA, LOWPASS, lowCutoff, FILT_Q);
LRFilterSetParams(filter->HPA, HIGHPASS, lowCutoff, FILT_Q);
return NOERR;
}
Error_t
MultibandFilterSetLowCutoffD(MultibandFilterD* filter, double lowCutoff)
{
filter->lowCutoff = lowCutoff;
LRFilterSetParamsD(filter->LPA, LOWPASS, lowCutoff, FILT_Q);
LRFilterSetParamsD(filter->HPA, HIGHPASS, lowCutoff, FILT_Q);
return NOERR;
}
/*******************************************************************************
MultibandFilterSetHighCutoff */
Error_t
MultibandFilterSetHighCutoff(MultibandFilter* filter, float highCutoff)
{
filter->highCutoff = highCutoff;
LRFilterSetParams(filter->LPB, LOWPASS, highCutoff, FILT_Q);
LRFilterSetParams(filter->HPB, HIGHPASS, highCutoff, FILT_Q);
return NOERR;
}
Error_t
MultibandFilterSetHighCutoffD(MultibandFilterD* filter, double highCutoff)
{
filter->highCutoff = highCutoff;
LRFilterSetParamsD(filter->LPB, LOWPASS, highCutoff, FILT_Q);
LRFilterSetParamsD(filter->HPB, HIGHPASS, highCutoff, FILT_Q);
return NOERR;
}
/*******************************************************************************
MultibandFilterUpdate */
Error_t
MultibandFilterUpdate(MultibandFilter* filter,
float lowCutoff,
float highCutoff)
{
filter->lowCutoff = lowCutoff;
filter->highCutoff = highCutoff;
LRFilterSetParams(filter->LPA, LOWPASS, lowCutoff, FILT_Q);
LRFilterSetParams(filter->HPA, HIGHPASS, lowCutoff, FILT_Q);
LRFilterSetParams(filter->LPB, LOWPASS, highCutoff, FILT_Q);
LRFilterSetParams(filter->HPB, HIGHPASS, highCutoff, FILT_Q);
return NOERR;
}
Error_t
MultibandFilterUpdateD(MultibandFilterD* filter,
double lowCutoff,
double highCutoff)
{
filter->lowCutoff = lowCutoff;
filter->highCutoff = highCutoff;
LRFilterSetParamsD(filter->LPA, LOWPASS, lowCutoff, FILT_Q);
LRFilterSetParamsD(filter->HPA, HIGHPASS, lowCutoff, FILT_Q);
LRFilterSetParamsD(filter->LPB, LOWPASS, highCutoff, FILT_Q);
LRFilterSetParamsD(filter->HPB, HIGHPASS, highCutoff, FILT_Q);
return NOERR;
}
/*******************************************************************************
MultibandFilterProcess */
Error_t
MultibandFilterProcess(MultibandFilter* filter,
float* lowOut,
float* midOut,
float* highOut,
const float* inBuffer,
unsigned n_samples)
{
float tempLow[n_samples];
float tempHi[n_samples];
LRFilterProcess(filter->LPA, tempLow, inBuffer, n_samples);
LRFilterProcess(filter->HPA, tempHi, inBuffer, n_samples);
RBJFilterProcess(filter->APF, lowOut, tempLow, n_samples);
LRFilterProcess(filter->LPB, midOut, tempHi, n_samples);
LRFilterProcess(filter->HPB, highOut, tempHi, n_samples);
return NOERR;
}
Error_t
MultibandFilterProcessD(MultibandFilterD* filter,
double* lowOut,
double* midOut,
double* highOut,
const double* inBuffer,
unsigned n_samples)
{
double tempLow[n_samples];
double tempHi[n_samples];
LRFilterProcessD(filter->LPA, tempLow, inBuffer, n_samples);
LRFilterProcessD(filter->HPA, tempHi, inBuffer, n_samples);
RBJFilterProcessD(filter->APF, lowOut, tempLow, n_samples);
LRFilterProcessD(filter->LPB, midOut, tempHi, n_samples);
LRFilterProcessD(filter->HPB, highOut, tempHi, n_samples);
return NOERR;
}
| hamiltonkibbe/FxDSP | FxDSP/src/MultibandBank.c | C | mit | 8,273 |
#include<stdio.h>
#include<unistd.h>
#include<stdlib.h>
#include<string.h>
int main(){
int pid1,pid2,pid3,pid4;
int p1[2],p2[2];
char bufr[30],rev[30];
int countL=0,countU=0,i=-1,j=0,countV=0,len;
pipe(p1);
pipe(p2);
if(pid1=fork()==0){
if(pid2=fork()==0){
read(p2[0],bufr,sizeof(bufr));
len=strlen(bufr);
for(i=len-1,j=0;j<len;i--,j++)
rev[j]=bufr[i];
rev[j]='\0';
printf("Proccess D---- Reverse = %s \n",rev);
exit(1);
}
else{
read(p1[0],bufr,sizeof(bufr));
write(p2[1],bufr,sizeof(bufr));
if(pid3=fork()==0){
printf("Poccess C--- ID of B = %d and ID of C = %d \n",getppid(),getpid());
exit(1);
}
else{
while(bufr[++i]!='\0')
if(bufr[i]>='A' && bufr[i]<='Z')
countU++;
i=-1;
while(bufr[++i]!='\0')
if(bufr[i]>='a' && bufr[i]<='z')
countL++;
printf("Poccess B--- No of UpperCase letters = %d \n",countU);
printf("Poccess B--- No of LowerCase letters = %d \n",countL);
waitpid(pid2,NULL,0);
waitpid(pid3,NULL,0);
}
}
exit(1);
}
else{
printf("Poccess A--- Enter a sentence ");
gets(bufr);
write(p1[1],bufr,sizeof(bufr));
while(bufr[++i]!='\0')
if(bufr[i]=='a' || bufr[i]=='e' || bufr[i]=='i' || bufr[i]=='o' || bufr[i]=='u' ||
bufr[i]=='A' || bufr[i]=='E' || bufr[i]=='I' || bufr[i]=='O' || bufr[i]=='U' )
countV++;
printf("Poccess A--- No of Vowels = %d \n",countV);
waitpid(pid1,NULL,0);
}
close(p1[0]);
close(p1[1]);
return 0;
}
| CSE-SOE-CUSAT/NOSLab | csb/extras/a-b-c-d-pipe.c | C | mit | 1,716 |
/* ************************************************************************** */
/* */
/* ::: :::::::: */
/* get_next_line.c :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: gmange <gmange@student.42.fr> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2013/12/02 16:03:39 by gmange #+# #+# */
/* Updated: 2016/09/27 10:53:59 by gmange ### ########.fr */
/* */
/* ************************************************************************** */
#include <unistd.h>
#include "libft.h"
#include "get_next_line.h"
/*
** returns 0 when reading 0 and, leaves line == NULL
** ONLY if there is an empty end of line at the end of file...
*/
/*
** structures with the same fd are set in a row
*/
/*
** 3 reasons to come in:
** 1. chck, remaining struct with corresponding fd from previous read
** 2. chck buf after reading
** 3. read last bits.
*/
/*
** EOF == -1 in C or read == 0
*/
/*
** I check I have no new line already in memory
** would I have I fill line and send it already
** else
** I create a new structure to read in it
** I check it and read again until I find a line or finishes reading
*/
static int del_cur(t_read **root, t_read *cur, int continu)
{
t_read *tmp;
if (cur == *root)
*root = GNL_NXT;
else
{
tmp = *root;
while (tmp->nxt != cur)
tmp = tmp->nxt;
tmp->nxt = GNL_NXT;
}
ft_memdel((void**)&GNL_BUF);
ft_memdel((void**)&cur);
return (continu);
}
static int line_from_lst(char **line, t_read **root, int const fd)
{
t_read *cur;
t_read *tmp;
size_t i;
cur = *root;
while (GNL_FD != fd)
cur = GNL_NXT;
i = 0;
while (cur && GNL_FD == fd)
{
while (GNL_IDX < GNL_SZE && GNL_BUF[GNL_IDX] != CHAR)
(*line)[i++] = GNL_BUF[GNL_IDX++];
if (GNL_BUF[GNL_IDX] == CHAR && ++GNL_IDX >= GNL_SZE)
return (del_cur(root, cur, 1));
if (GNL_IDX < GNL_SZE)
return (1);
tmp = GNL_NXT;
if (GNL_IDX >= GNL_SZE)
del_cur(root, cur, 1);
cur = tmp;
}
return (0);
}
static int find_endl(char **line, t_read **root, t_read *cur, int continu)
{
t_read *tmp;
size_t len;
len = GNL_IDX;
while (len < GNL_SZE && (unsigned char)GNL_BUF[len] != (unsigned char)CHAR)
len++;
if (!continu || (unsigned char)GNL_BUF[len] == (unsigned char)CHAR)
{
len -= GNL_IDX;
tmp = *root;
while (tmp->fd != GNL_FD)
tmp = tmp->nxt;
while (tmp != cur && (len += tmp->sze))
tmp = tmp->nxt;
if (!continu && len == 0)
return (del_cur(root, cur, continu));
if (!(*line = (char*)ft_memalloc(sizeof(char) * (len + 1))))
return (-1);
return (line_from_lst(line, root, GNL_FD));
}
return (0);
}
static t_read *new_read(t_read **root, int const fd)
{
t_read *cur;
if (!(cur = (t_read*)ft_memalloc(sizeof(*cur))))
return (NULL);
GNL_FD = fd;
if (!(GNL_BUF = (char*)ft_memalloc(sizeof(*GNL_BUF) * GNL_BUF_SZE)))
{
ft_memdel((void**)&cur);
return (NULL);
}
if ((int)(GNL_SZE = read(GNL_FD, GNL_BUF, GNL_BUF_SZE)) < 0)
{
ft_memdel((void**)&GNL_BUF);
ft_memdel((void**)&cur);
return (NULL);
}
GNL_IDX = 0;
GNL_NXT = NULL;
if (*root)
GNL_NXT = (*root)->nxt;
if (*root)
(*root)->nxt = cur;
else
*root = cur;
return (cur);
}
int get_next_line(int const fd, char **line)
{
size_t ret;
static t_read *root = NULL;
t_read *cur;
if (!line || (*line = NULL))
return (-1);
cur = root;
while (cur && GNL_FD != fd)
cur = GNL_NXT;
if (cur && GNL_FD == fd && (ret = find_endl(line, &root, cur, 1)))
return (ret);
while (cur && GNL_FD == fd && GNL_NXT)
cur = GNL_NXT;
while (1)
{
if (root && !(cur = new_read(&cur, fd)))
return (-1);
if (!root && !(cur = new_read(&root, fd)))
return (-1);
if (!GNL_SZE)
return (find_endl(line, &root, cur, 0));
if ((ret = find_endl(line, &root, cur, 1)))
return (ret);
}
}
| gmange/RT | src/get_next_line.c | C | mit | 4,184 |
#include <math.h>
#include <stdio.h>
#include <pthread.h>
#include <stdlib.h>
#define THREAD_COUNT 4
typedef struct {
int start;
int end;
} range_t;
void *calculate_range(void* range) {
range_t* curr_range = (range_t*)range;
void* result = (void*)1;
for (int i = curr_range->start; i < curr_range->end; i++) {
double a = cos(i) * cos(i) + sin(i) * sin(i);
if (a > 1.0005 || a < 0.9995) {
result = (void*)0;
}
}
free(curr_range);
return result;
}
int main() {
pthread_t threads[THREAD_COUNT];
int arg_start = 0;
for (int i = 0; i < THREAD_COUNT; i++) {
range_t *curr_range = (range_t*)malloc(sizeof(range_t));
curr_range->start = arg_start;
curr_range->end = arg_start + 25000000;
int res = pthread_create(&threads[i], NULL, calculate_range, curr_range);
if (res != 0) {
perror("Could not spawn new thread");
exit(-1);
}
arg_start = curr_range->end;
}
long final_result = 1;
for (int i = 0; i < THREAD_COUNT; i++) {
void *thread_result;
int res = pthread_join(threads[i], (void **)&thread_result);
if (res != 0) {
perror("Could not spawn thread");
exit(-1);
}
final_result <<= (long)thread_result;
}
if (final_result & (1 << 4)) {
printf("OK!\n");
} else {
printf("Not OK!\n");
}
return 0;
}
| arnaudoff/elsys | 2015-2016/operating-systems/threads_homework/main.c | C | mit | 1,476 |
#include "cf_internal.h"
#define CACHE_SIZE 1024
#define INDEX(i) ((i) % CACHE_SIZE)
static frame_cache_t* open_real_video_cache(cf_session_t* s)
{
frame_cache_t* cache = calloc(1, sizeof(frame_cache_t));
cache->wait_timer = s->proc->rtt + 2 * s->proc->rtt_val;
cache->state = buffer_waiting;
cache->frame_timer = 100;
cache->size = CACHE_SIZE;
cache->frames = calloc(cache->size, sizeof(cf_frame_t));
return cache;
}
static inline void real_video_clean_frame(cf_session_t* session, frame_cache_t* c, cf_frame_t* frame)
{
int i;
if (frame->seg_number == 0)
return;
for (i = 0; i < frame->seg_number; ++i){
if (frame->segments[i] != NULL){
slab_free(session->mem, frame->segments[i]);
frame->segments[i] = NULL;
}
}
free(frame->segments);
log_debug("buffer clean frame, frame id = %u, ts = %llu\n", frame->fid, frame->ts);
frame->ts = 0;
frame->frame_type = 0;
frame->seg_number = 0;
c->min_fid = frame->fid;
}
static void close_real_video_cache(cf_session_t* s, frame_cache_t* cache)
{
uint32_t i;
for (i = 0; i < cache->size; ++i)
real_video_clean_frame(s, cache, &cache->frames[i]);
free(cache->frames);
free(cache);
}
static void reset_real_video_cache(cf_session_t* s, frame_cache_t* cache)
{
uint32_t i;
for (i = 0; i < cache->size; ++i)
real_video_clean_frame(s, cache, &cache->frames[i]);
cache->min_fid = 0;
cache->max_fid = 0;
cache->play_ts = 0;
cache->frame_ts = 0;
cache->max_ts = 100;
cache->frame_timer = 100;
cache->state = buffer_waiting;
cache->wait_timer = SU_MAX(100, s->proc->rtt + 2 * s->proc->rtt_val);
cache->loss_flag = 0;
}
static void real_video_evict_frame(cf_session_t* s, frame_cache_t* c, uint32_t fid)
{
uint32_t pos, i;
for (pos = c->max_fid + 1; pos <= fid; pos++)
real_video_clean_frame(s, c, &c->frames[INDEX(pos)]);
if (fid < c->min_fid + c->size)
return;
for (pos = c->min_fid + 1; pos < c->max_fid; ++pos){
if (c->frames[INDEX(pos)].frame_type == 1)
break;
}
for (i = c->min_fid + 1; i < pos; ++i)
real_video_clean_frame(s, c, &c->frames[INDEX(i)]);
}
static int real_video_cache_put(cf_session_t* s, frame_cache_t* c, cf_seg_video_t* seg)
{
cf_frame_t* frame;
int ret = -1;
if (seg->index >= seg->total){
assert(0);
return ret;
}
else if (seg->fid <= c->min_fid)
return ret;
if (seg->fid > c->max_fid){
if (c->max_fid > 0)
real_video_evict_frame(s, c, seg->fid);
else if (c->min_fid == 0)
c->min_fid = seg->fid - 1;
if (c->max_fid >= 0 && c->max_fid < seg->fid && c->max_ts < seg->ts){
c->frame_timer = (seg->ts - c->max_ts) / (seg->fid - c->max_fid);
if (c->frame_timer < 20)
c->frame_timer = 20;
else if (c->frame_timer > 200)
c->frame_timer = 200;
}
c->max_ts = seg->ts;
c->max_fid = seg->fid;
}
log_debug("buffer put video frame, frame = %u, seq = %u, ts = %u\n", seg->fid, seg->seq, seg->ts);
frame = &(c->frames[INDEX(seg->fid)]);
frame->fid = seg->fid;
frame->frame_type = seg->ftype;
frame->ts = seg->ts;
if (frame->seg_number == 0){
frame->seg_number = seg->total;
frame->segments = calloc(frame->seg_number, sizeof(seg));
frame->segments[seg->index] = seg;
ret = 0;
}
else{
if (frame->segments[seg->index] == NULL){
frame->segments[seg->index] = seg;
ret = 0;
}
}
return ret;
}
static void real_video_cache_check_playing(cf_session_t* s, frame_cache_t* c)
{
uint64_t max_ts, min_ts;
if (c->max_fid > c->min_fid){
max_ts = c->frames[INDEX(c->max_fid)].ts;
min_ts = c->frames[INDEX(c->min_fid + 1)].ts;
if (min_ts > 0 && max_ts > min_ts + (c->wait_timer * 5 / 4) && c->max_fid >= c->min_fid + 1){
c->state = buffer_playing;
c->play_ts = GET_SYS_MS();
c->frame_ts = max_ts - (c->wait_timer * 5 / 4);
}
}
}
static inline void real_video_cache_check_waiting(cf_session_t* s, frame_cache_t* c)
{
if (c->max_fid <= c->min_fid){
c->state = buffer_waiting;
log_debug("buffer waiting ...........\n");
}
}
static inline int real_video_cache_check_frame_full(cf_session_t* s, cf_frame_t* frame)
{
int i;
for (i = 0; i < frame->seg_number; ++i)
if (frame->segments[i] == NULL)
return -1;
return 0;
}
static inline void real_video_cache_sync_timestamp(cf_session_t* s, frame_cache_t* c)
{
uint64_t cur_ts = GET_SYS_MS();
if (cur_ts > c->play_ts){
c->frame_ts = (uint32_t)(cur_ts - c->play_ts) + c->frame_ts;
c->play_ts = cur_ts;
}
}
static int real_video_cache_get(cf_session_t* s, frame_cache_t* c, uint8_t* data, size_t* sizep)
{
uint32_t pos;
size_t size;
int ret, i;
cf_frame_t* frame;
uint64_t max_ts;
if (c->state == buffer_waiting)
real_video_cache_check_playing(s, c);
else
real_video_cache_check_waiting(s, c);
if (c->state != buffer_playing){
size = 0;
ret = -1;
goto err;
}
real_video_cache_sync_timestamp(s, c);
max_ts = c->frames[INDEX(c->max_fid)].ts;
pos = INDEX(c->min_fid + 1);
frame = &c->frames[pos];
if (frame->ts <= c->frame_ts && real_video_cache_check_frame_full(s, frame) == 0){
size = 0;
for (i = 0; i < frame->seg_number; ++i){
memcpy(data + size, frame->segments[i]->data, frame->segments[i]->data_size);
size += frame->segments[i]->data_size;
}
if (frame->ts + c->wait_timer * 5 / 4 >= max_ts || c->min_fid + 1 == c->max_fid)
c->frame_ts = frame->ts;
else
c->frame_ts = max_ts - c->wait_timer * 5 / 4;
real_video_clean_frame(s, c, frame);
ret = 0;
}
else{
size = 0;
ret = -1;
}
err:
*sizep = size;
return ret;
}
static uint32_t real_video_cache_get_min_seq(cf_session_t* s, frame_cache_t* c)
{
int i;
cf_frame_t* frame;
cf_seg_video_t* seg;
frame = &c->frames[INDEX(c->min_fid)];
for (i = 0; i < frame->seg_number; ++i){
seg = frame->segments[i];
if (seg != NULL)
return seg->seq - seg->index - 1;
}
return 0;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
typedef struct
{
uint64_t ts;
int count;
}wb_loss_t;
static inline void loss_free(skiplist_item_t key, skiplist_item_t val)
{
free(val.ptr);
}
cf_video_real_buffer_t* create_video_real_buffer(cf_session_t* s)
{
cf_video_real_buffer_t* r = calloc(1, sizeof(cf_video_real_buffer_t));
r->loss = skiplist_create(id_compare, loss_free);
r->cache = open_real_video_cache(s);
r->cache_ts = GET_SYS_MS();
return r;
}
void destroy_video_real_buffer(cf_session_t* s, cf_video_real_buffer_t* r)
{
if (r == NULL)
return;
assert(r->cache && r->loss);
skiplist_destroy(r->loss);
close_real_video_cache(s, r->cache);
free(r);
}
void reset_video_real_buffer(cf_session_t* s, cf_video_real_buffer_t* r)
{
reset_real_video_cache(s, r->cache);
skiplist_clear(r->loss);
r->base_uid = 0;
r->base_seq = 0;
r->actived = 0;
r->max_seq = 0;
r->ack_ts = GET_SYS_MS();
r->active_ts = r->ack_ts;
r->loss_count = 0;
}
int active_video_real_buffer(cf_session_t* s, cf_video_real_buffer_t* r, uint32_t start_seq, uint16_t rate, uint32_t base_uid)
{
if (r->actived == 1)
return -1;
if (start_seq > 0){
r->base_seq = start_seq;
r->max_seq = r->base_seq;
}
r->actived = 1;
r->base_seq = start_seq;
r->base_uid = base_uid;
if (rate > 0)
r->cache->frame_timer = SU_MAX(20, 1000 / rate);
r->active_ts = GET_SYS_MS();
return 0;
}
static void video_real_buffer_update_loss(cf_session_t* s, cf_video_real_buffer_t* r, uint32_t seq)
{
uint32_t i;
skiplist_item_t key, val;
skiplist_iter_t* iter;
key.u32 = seq;
skiplist_remove(r->loss, key);
for (i = r->max_seq + 1; i < seq; ++i){
key.u32 = i;
iter = skiplist_search(r->loss, key);
if (iter == NULL){
wb_loss_t* l = calloc(1, sizeof(wb_loss_t));
l->ts = GET_SYS_MS() - s->proc->rtt;
l->count = 0;
val.ptr = l;
skiplist_insert(r->loss, key, val);
}
}
}
static inline void video_send_segment(cf_session_t* s, cf_segment_ack_t* ack)
{
cf_header_t header;
CF_INIT_HEADER(header, SEG_ACK, s->rid, s->uid);
cf_encode_msg(&s->sstrm, &header, ack);
processor_send(s, s->proc, &s->sstrm, &s->proc->server_node);
}
static void video_real_ack(cf_session_t* s, cf_video_real_buffer_t* r, int hb)
{
uint64_t cur_ts;
cf_segment_ack_t ack;
skiplist_iter_t* iter;
skiplist_item_t key;
wb_loss_t* l;
uint32_t min_seq, delay;
int max_count = 0;
cur_ts = GET_SYS_MS();
if (r->ack_ts + 10 < cur_ts){
min_seq = real_video_cache_get_min_seq(s, r->cache);
if (min_seq > r->base_seq){
for (key.u32 = r->base_seq + 1; key.u32 <= min_seq; ++key.u32)
skiplist_remove(r->loss, key);
r->base_seq = min_seq;
}
ack.base_uid = r->base_uid;
ack.base = r->base_seq;
ack.loss_num = 0;
SKIPLIST_FOREACH(r->loss, iter){
l = (wb_loss_t*)iter->val.ptr;
if (iter->key.u32 <= r->base_seq)
continue;
if (l->ts + s->proc->rtt + s->proc->rtt_val <= cur_ts && ack.loss_num < LOSS_SISE){
ack.loss[ack.loss_num++] = iter->key.u32;
l->ts = cur_ts;
r->loss_count++;
l->count++;
}
if (l->count > max_count)
max_count = l->count;
}
if (ack.loss_num > 0 || hb == 0)
video_send_segment(s, &ack);
r->ack_ts = cur_ts;
}
/*if (r->active_ts + 10000 < cur_ts)*/{
if (max_count > 1){
delay = (max_count * 16 + 7) * (s->proc->rtt + s->proc->rtt_val) / 16;
if (delay > r->cache->wait_timer)
r->cache->wait_timer = SU_MIN(delay, 5000);
}
else if (skiplist_size(r->loss) > 0)
r->cache->wait_timer = SU_MAX((s->proc->rtt + s->proc->rtt_val * 2), r->cache->wait_timer);
}
r->cache->wait_timer = SU_MAX(r->cache->frame_timer, r->cache->wait_timer);
}
int video_real_video_put(cf_session_t* s, cf_video_real_buffer_t* r, cf_seg_video_t* seg)
{
uint32_t seq;
cf_seg_video_t* tmp;
if (r->max_seq == 0 && seg->ftype == 0)
return -1;
seq = seg->seq;
if (r->actived == 0 || seg->seq <= r->base_seq || (r->max_seq > 0 && seq > r->max_seq + 2000)){
return -1;
}
if (r->max_seq == 0 && seg->seq > seg->index){
r->max_seq = seg->seq - seg->index - 1;
r->base_seq = seg->seq - seg->index - 1;
}
video_real_buffer_update_loss(s, r, seq);
tmp = calloc(1, sizeof(cf_seg_video_t));
*tmp = *seg;
if (real_video_cache_put(s, r->cache, tmp) != 0){
free(tmp);
return -1;
}
if (seq == r->base_seq + 1)
r->base_seq = seq;
r->max_seq = SU_MAX(r->max_seq, seq);
video_real_ack(s, r, 0);
return 0;
}
int video_real_video_get(cf_session_t* s, cf_video_real_buffer_t* r, uint8_t* data, size_t* sizep)
{
if (r->actived == 0)
return -1;
return real_video_cache_get(s, r->cache, data, sizep);
}
void video_real_video_timer(cf_session_t* s, cf_video_real_buffer_t* r)
{
video_real_ack(s, r, 1);
if (r->cache_ts + SU_MAX(s->proc->rtt + s->proc->rtt_val, 1000) < GET_SYS_MS()){
if (r->loss_count == 0)
r->cache->wait_timer = SU_MAX(r->cache->wait_timer * 7 / 8, r->cache->frame_timer);
else if (r->cache->wait_timer > 2 * (s->proc->rtt + s->proc->rtt_val))
r->cache->wait_timer = SU_MAX(r->cache->wait_timer * 15 / 16, r->cache->frame_timer);
r->cache_ts = GET_SYS_MS();
r->loss_count = 0;
}
}
| yuanrongxi/sharing | sharing/buffer/cf_real_video.c | C | mit | 10,971 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <avro/platform.h>
#include <stdlib.h>
#include <string.h>
#include "avro_private.h"
#include "avro/allocation.h"
#include "avro/basics.h"
#include "avro/data.h"
#include "avro/errors.h"
#include "avro/refcount.h"
#include "avro/resolver.h"
#include "avro/schema.h"
#include "avro/value.h"
#include "st.h"
#ifndef AVRO_RESOLVER_DEBUG
#define AVRO_RESOLVER_DEBUG 0
#endif
#if AVRO_RESOLVER_DEBUG
#include <stdio.h>
#define AVRO_DEBUG(...) \
do { \
fprintf(stderr, __VA_ARGS__); \
fprintf(stderr, "\n"); \
} while (0)
#else
#define AVRO_DEBUG(...) /* don't print messages */
#endif
typedef struct avro_resolved_reader avro_resolved_reader_t;
struct avro_resolved_reader {
avro_value_iface_t parent;
/** The reference count for this interface. */
volatile int refcount;
/** The writer schema. */
avro_schema_t wschema;
/** The reader schema. */
avro_schema_t rschema;
/* The size of the value instances for this resolver. */
size_t instance_size;
/* A function to calculate the instance size once the overall
* top-level resolver (and all of its children) have been
* constructed. */
void
(*calculate_size)(avro_resolved_reader_t *iface);
/* A free function for this resolver */
void
(*free_iface)(avro_resolved_reader_t *iface, st_table *freeing);
/* An initialization function for instances of this resolver. */
int
(*init)(const avro_resolved_reader_t *iface, void *self);
/* A finalization function for instances of this resolver. */
void
(*done)(const avro_resolved_reader_t *iface, void *self);
/* Clear out any existing wrappers, if any */
int
(*reset_wrappers)(const avro_resolved_reader_t *iface, void *self);
};
#define avro_resolved_reader_calculate_size(iface) \
do { \
if ((iface)->calculate_size != NULL) { \
(iface)->calculate_size((iface)); \
} \
} while (0)
#define avro_resolved_reader_init(iface, self) \
((iface)->init == NULL? 0: (iface)->init((iface), (self)))
#define avro_resolved_reader_done(iface, self) \
((iface)->done == NULL? (void) 0: (iface)->done((iface), (self)))
#define avro_resolved_reader_reset_wrappers(iface, self) \
((iface)->reset_wrappers == NULL? 0: \
(iface)->reset_wrappers((iface), (self)))
/*
* We assume that each instance type in this value contains an an
* avro_value_t as its first element, which is the current wrapped
* value.
*/
void
avro_resolved_reader_set_source(avro_value_t *resolved,
avro_value_t *dest)
{
avro_value_t *self = (avro_value_t *) resolved->self;
if (self->self != NULL) {
avro_value_decref(self);
}
avro_value_copy_ref(self, dest);
}
void
avro_resolved_reader_clear_source(avro_value_t *resolved)
{
avro_value_t *self = (avro_value_t *) resolved->self;
if (self->self != NULL) {
avro_value_decref(self);
}
self->iface = NULL;
self->self = NULL;
}
int
avro_resolved_reader_new_value(avro_value_iface_t *viface,
avro_value_t *value)
{
int rval;
avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
void *self = avro_malloc(iface->instance_size + sizeof(volatile int));
if (self == NULL) {
value->iface = NULL;
value->self = NULL;
return ENOMEM;
}
memset(self, 0, iface->instance_size + sizeof(volatile int));
volatile int *refcount = (volatile int *) self;
self = (char *) self + sizeof(volatile int);
rval = avro_resolved_reader_init(iface, self);
if (rval != 0) {
avro_free(self, iface->instance_size + sizeof(volatile int));
value->iface = NULL;
value->self = NULL;
return rval;
}
*refcount = 1;
value->iface = avro_value_iface_incref(viface);
value->self = self;
return 0;
}
static void
avro_resolved_reader_free_value(const avro_value_iface_t *viface, void *vself)
{
avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
avro_value_t *self = (avro_value_t *) vself;
avro_resolved_reader_done(iface, vself);
if (self->self != NULL) {
avro_value_decref(self);
}
vself = (char *) vself - sizeof(volatile int);
avro_free(vself, iface->instance_size + sizeof(volatile int));
}
static void
avro_resolved_reader_incref(avro_value_t *value)
{
/*
* This only works if you pass in the top-level value.
*/
volatile int *refcount = (volatile int *) ((char *) value->self - sizeof(volatile int));
avro_refcount_inc(refcount);
}
static void
avro_resolved_reader_decref(avro_value_t *value)
{
/*
* This only works if you pass in the top-level value.
*/
volatile int *refcount = (volatile int *) ((char *) value->self - sizeof(volatile int));
if (avro_refcount_dec(refcount)) {
avro_resolved_reader_free_value(value->iface, value->self);
}
}
static avro_value_iface_t *
avro_resolved_reader_incref_iface(avro_value_iface_t *viface)
{
avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
avro_refcount_inc(&iface->refcount);
return viface;
}
static void
free_resolver(avro_resolved_reader_t *iface, st_table *freeing)
{
/* First check if we've already started freeing this resolver. */
if (st_lookup(freeing, (st_data_t) iface, NULL)) {
AVRO_DEBUG("Already freed %p", iface);
return;
}
/* Otherwise add this resolver to the freeing set, then free it. */
st_insert(freeing, (st_data_t) iface, (st_data_t) NULL);
AVRO_DEBUG("Freeing resolver %p (%s->%s)", iface,
avro_schema_type_name(iface->wschema),
avro_schema_type_name(iface->rschema));
iface->free_iface(iface, freeing);
}
static void
avro_resolved_reader_calculate_size_(avro_resolved_reader_t *iface)
{
/* Only calculate the size for any resolver once */
iface->calculate_size = NULL;
AVRO_DEBUG("Calculating size for %s->%s",
avro_schema_type_name((iface)->wschema),
avro_schema_type_name((iface)->rschema));
iface->instance_size = sizeof(avro_value_t);
}
static void
avro_resolved_reader_free_iface(avro_resolved_reader_t *iface, st_table *freeing)
{
AVRO_UNUSED(freeing);
avro_schema_decref(iface->wschema);
avro_schema_decref(iface->rschema);
avro_freet(avro_resolved_reader_t, iface);
}
static void
avro_resolved_reader_decref_iface(avro_value_iface_t *viface)
{
avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
AVRO_DEBUG("Decref resolver %p (before=%d)", iface, iface->refcount);
if (avro_refcount_dec(&iface->refcount)) {
st_table *freeing = st_init_numtable();
free_resolver(iface, freeing);
st_free_table(freeing);
}
}
static int
avro_resolved_reader_reset(const avro_value_iface_t *viface, void *vself)
{
/*
* To reset a wrapped value, we first clear out any wrappers,
* and then have the wrapped value reset itself.
*/
int rval;
avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
avro_value_t *self = (avro_value_t *) vself;
check(rval, avro_resolved_reader_reset_wrappers(iface, vself));
return avro_value_reset(self);
}
static avro_type_t
avro_resolved_reader_get_type(const avro_value_iface_t *viface, const void *vself)
{
AVRO_UNUSED(vself);
const avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
return avro_typeof(iface->rschema);
}
static avro_schema_t
avro_resolved_reader_get_schema(const avro_value_iface_t *viface, const void *vself)
{
AVRO_UNUSED(vself);
avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
return iface->rschema;
}
static avro_resolved_reader_t *
avro_resolved_reader_create(avro_schema_t wschema, avro_schema_t rschema)
{
avro_resolved_reader_t *self = (avro_resolved_reader_t *) avro_new(avro_resolved_reader_t);
memset(self, 0, sizeof(avro_resolved_reader_t));
self->parent.incref_iface = avro_resolved_reader_incref_iface;
self->parent.decref_iface = avro_resolved_reader_decref_iface;
self->parent.incref = avro_resolved_reader_incref;
self->parent.decref = avro_resolved_reader_decref;
self->parent.reset = avro_resolved_reader_reset;
self->parent.get_type = avro_resolved_reader_get_type;
self->parent.get_schema = avro_resolved_reader_get_schema;
self->refcount = 1;
self->wschema = avro_schema_incref(wschema);
self->rschema = avro_schema_incref(rschema);
self->calculate_size = avro_resolved_reader_calculate_size_;
self->free_iface = avro_resolved_reader_free_iface;
self->reset_wrappers = NULL;
return self;
}
/*-----------------------------------------------------------------------
* Memoized resolvers
*/
typedef struct avro_resolved_link_reader avro_resolved_link_reader_t;
typedef struct memoize_state_t {
avro_memoize_t mem;
avro_resolved_link_reader_t *links;
} memoize_state_t;
static avro_resolved_reader_t *
avro_resolved_reader_new_memoized(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema);
/*-----------------------------------------------------------------------
* Recursive schemas
*/
/*
* Recursive schemas are handled specially; the value implementation for
* an AVRO_LINK schema is simply a wrapper around the value
* implementation for the link's target schema. The value methods all
* delegate to the wrapped implementation.
*
* Complicating the links here is that we might be linking to the writer
* schema or the reader schema. This only matters for a couple of
* methods, so instead of keeping a boolean flag in the value interface,
* we just have separate method implementations that we slot in
* appropriately.
*/
struct avro_resolved_link_reader {
avro_resolved_reader_t parent;
/**
* A pointer to the “next” link resolver that we've had to
* create. We use this as we're creating the overall top-level
* resolver to keep track of which ones we have to fix up
* afterwards.
*/
avro_resolved_link_reader_t *next;
/** The target's implementation. */
avro_resolved_reader_t *target_resolver;
};
typedef struct avro_resolved_link_value {
avro_value_t wrapped;
avro_value_t target;
} avro_resolved_link_value_t;
static void
avro_resolved_wlink_reader_calculate_size(avro_resolved_reader_t *iface)
{
/* Only calculate the size for any resolver once */
iface->calculate_size = NULL;
AVRO_DEBUG("Calculating size for [%s]->%s",
avro_schema_type_name((iface)->wschema),
avro_schema_type_name((iface)->rschema));
iface->instance_size = sizeof(avro_resolved_link_value_t);
}
static void
avro_resolved_rlink_reader_calculate_size(avro_resolved_reader_t *iface)
{
/* Only calculate the size for any resolver once */
iface->calculate_size = NULL;
AVRO_DEBUG("Calculating size for %s->[%s]",
avro_schema_type_name((iface)->wschema),
avro_schema_type_name((iface)->rschema));
iface->instance_size = sizeof(avro_resolved_link_value_t);
}
static void
avro_resolved_link_reader_free_iface(avro_resolved_reader_t *iface, st_table *freeing)
{
avro_resolved_link_reader_t *liface =
container_of(iface, avro_resolved_link_reader_t, parent);
if (liface->target_resolver != NULL) {
free_resolver(liface->target_resolver, freeing);
}
avro_schema_decref(iface->wschema);
avro_schema_decref(iface->rschema);
avro_freet(avro_resolved_link_reader_t, iface);
}
static int
avro_resolved_link_reader_init(const avro_resolved_reader_t *iface, void *vself)
{
int rval;
const avro_resolved_link_reader_t *liface =
container_of(iface, avro_resolved_link_reader_t, parent);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
size_t target_instance_size = liface->target_resolver->instance_size;
self->target.iface = &liface->target_resolver->parent;
self->target.self = avro_malloc(target_instance_size);
if (self->target.self == NULL) {
return ENOMEM;
}
AVRO_DEBUG("Allocated <%p:%" PRIsz "> for link", self->target.self, target_instance_size);
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
rval = avro_resolved_reader_init(liface->target_resolver, self->target.self);
if (rval != 0) {
avro_free(self->target.self, target_instance_size);
}
return rval;
}
static void
avro_resolved_link_reader_done(const avro_resolved_reader_t *iface, void *vself)
{
const avro_resolved_link_reader_t *liface =
container_of(iface, avro_resolved_link_reader_t, parent);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
size_t target_instance_size = liface->target_resolver->instance_size;
AVRO_DEBUG("Freeing <%p:%" PRIsz "> for link", self->target.self, target_instance_size);
avro_resolved_reader_done(liface->target_resolver, self->target.self);
avro_free(self->target.self, target_instance_size);
self->target.iface = NULL;
self->target.self = NULL;
}
static int
avro_resolved_link_reader_reset(const avro_resolved_reader_t *iface, void *vself)
{
const avro_resolved_link_reader_t *liface =
container_of(iface, avro_resolved_link_reader_t, parent);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
return avro_resolved_reader_reset_wrappers
(liface->target_resolver, self->target.self);
}
static avro_type_t
avro_resolved_link_reader_get_type(const avro_value_iface_t *iface, const void *vself)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_type(&self->target);
}
static avro_schema_t
avro_resolved_link_reader_get_schema(const avro_value_iface_t *iface, const void *vself)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_schema(&self->target);
}
static int
avro_resolved_link_reader_get_boolean(const avro_value_iface_t *iface,
const void *vself, int *out)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_boolean(&self->target, out);
}
static int
avro_resolved_link_reader_get_bytes(const avro_value_iface_t *iface,
const void *vself, const void **buf, size_t *size)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_bytes(&self->target, buf, size);
}
static int
avro_resolved_link_reader_grab_bytes(const avro_value_iface_t *iface,
const void *vself, avro_wrapped_buffer_t *dest)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_grab_bytes(&self->target, dest);
}
static int
avro_resolved_link_reader_get_double(const avro_value_iface_t *iface,
const void *vself, double *out)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_double(&self->target, out);
}
static int
avro_resolved_link_reader_get_float(const avro_value_iface_t *iface,
const void *vself, float *out)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_float(&self->target, out);
}
static int
avro_resolved_link_reader_get_int(const avro_value_iface_t *iface,
const void *vself, int32_t *out)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_int(&self->target, out);
}
static int
avro_resolved_link_reader_get_long(const avro_value_iface_t *iface,
const void *vself, int64_t *out)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_long(&self->target, out);
}
static int
avro_resolved_link_reader_get_null(const avro_value_iface_t *iface, const void *vself)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_null(&self->target);
}
static int
avro_resolved_link_reader_get_string(const avro_value_iface_t *iface,
const void *vself, const char **str, size_t *size)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_string(&self->target, str, size);
}
static int
avro_resolved_link_reader_grab_string(const avro_value_iface_t *iface,
const void *vself, avro_wrapped_buffer_t *dest)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_grab_string(&self->target, dest);
}
static int
avro_resolved_link_reader_get_enum(const avro_value_iface_t *iface,
const void *vself, int *out)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_enum(&self->target, out);
}
static int
avro_resolved_link_reader_get_fixed(const avro_value_iface_t *iface,
const void *vself, const void **buf, size_t *size)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_fixed(&self->target, buf, size);
}
static int
avro_resolved_link_reader_grab_fixed(const avro_value_iface_t *iface,
const void *vself, avro_wrapped_buffer_t *dest)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_grab_fixed(&self->target, dest);
}
static int
avro_resolved_link_reader_set_boolean(const avro_value_iface_t *iface,
void *vself, int val)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_set_boolean(&self->target, val);
}
static int
avro_resolved_link_reader_set_bytes(const avro_value_iface_t *iface,
void *vself, void *buf, size_t size)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_set_bytes(&self->target, buf, size);
}
static int
avro_resolved_link_reader_give_bytes(const avro_value_iface_t *iface,
void *vself, avro_wrapped_buffer_t *buf)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_give_bytes(&self->target, buf);
}
static int
avro_resolved_link_reader_set_double(const avro_value_iface_t *iface,
void *vself, double val)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_set_double(&self->target, val);
}
static int
avro_resolved_link_reader_set_float(const avro_value_iface_t *iface,
void *vself, float val)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_set_float(&self->target, val);
}
static int
avro_resolved_link_reader_set_int(const avro_value_iface_t *iface,
void *vself, int32_t val)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_set_int(&self->target, val);
}
static int
avro_resolved_link_reader_set_long(const avro_value_iface_t *iface,
void *vself, int64_t val)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_set_long(&self->target, val);
}
static int
avro_resolved_link_reader_set_null(const avro_value_iface_t *iface, void *vself)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_set_null(&self->target);
}
static int
avro_resolved_link_reader_set_string(const avro_value_iface_t *iface,
void *vself, const char *str)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_set_string(&self->target, str);
}
static int
avro_resolved_link_reader_set_string_len(const avro_value_iface_t *iface,
void *vself, const char *str, size_t size)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_set_string_len(&self->target, str, size);
}
static int
avro_resolved_link_reader_give_string_len(const avro_value_iface_t *iface,
void *vself, avro_wrapped_buffer_t *buf)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_give_string_len(&self->target, buf);
}
static int
avro_resolved_link_reader_set_enum(const avro_value_iface_t *iface,
void *vself, int val)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_set_enum(&self->target, val);
}
static int
avro_resolved_link_reader_set_fixed(const avro_value_iface_t *iface,
void *vself, void *buf, size_t size)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_set_fixed(&self->target, buf, size);
}
static int
avro_resolved_link_reader_give_fixed(const avro_value_iface_t *iface,
void *vself, avro_wrapped_buffer_t *buf)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_give_fixed(&self->target, buf);
}
static int
avro_resolved_link_reader_get_size(const avro_value_iface_t *iface,
const void *vself, size_t *size)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_size(&self->target, size);
}
static int
avro_resolved_link_reader_get_by_index(const avro_value_iface_t *iface,
const void *vself, size_t index,
avro_value_t *child, const char **name)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_by_index(&self->target, index, child, name);
}
static int
avro_resolved_link_reader_get_by_name(const avro_value_iface_t *iface,
const void *vself, const char *name,
avro_value_t *child, size_t *index)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_by_name(&self->target, name, child, index);
}
static int
avro_resolved_link_reader_get_discriminant(const avro_value_iface_t *iface,
const void *vself, int *out)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_discriminant(&self->target, out);
}
static int
avro_resolved_link_reader_get_current_branch(const avro_value_iface_t *iface,
const void *vself, avro_value_t *branch)
{
AVRO_UNUSED(iface);
const avro_resolved_link_value_t *self = (const avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_get_current_branch(&self->target, branch);
}
static int
avro_resolved_link_reader_append(const avro_value_iface_t *iface,
void *vself, avro_value_t *child_out,
size_t *new_index)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_append(&self->target, child_out, new_index);
}
static int
avro_resolved_link_reader_add(const avro_value_iface_t *iface,
void *vself, const char *key,
avro_value_t *child, size_t *index, int *is_new)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_add(&self->target, key, child, index, is_new);
}
static int
avro_resolved_link_reader_set_branch(const avro_value_iface_t *iface,
void *vself, int discriminant,
avro_value_t *branch)
{
AVRO_UNUSED(iface);
avro_resolved_link_value_t *self = (avro_resolved_link_value_t *) vself;
avro_value_t *target_vself = (avro_value_t *) self->target.self;
*target_vself = self->wrapped;
return avro_value_set_branch(&self->target, discriminant, branch);
}
static avro_resolved_link_reader_t *
avro_resolved_link_reader_create(avro_schema_t wschema, avro_schema_t rschema)
{
avro_resolved_reader_t *self = (avro_resolved_reader_t *) avro_new(avro_resolved_link_reader_t);
memset(self, 0, sizeof(avro_resolved_link_reader_t));
self->parent.incref_iface = avro_resolved_reader_incref_iface;
self->parent.decref_iface = avro_resolved_reader_decref_iface;
self->parent.incref = avro_resolved_reader_incref;
self->parent.decref = avro_resolved_reader_decref;
self->parent.reset = avro_resolved_reader_reset;
self->parent.get_type = avro_resolved_link_reader_get_type;
self->parent.get_schema = avro_resolved_link_reader_get_schema;
self->parent.get_size = avro_resolved_link_reader_get_size;
self->parent.get_by_index = avro_resolved_link_reader_get_by_index;
self->parent.get_by_name = avro_resolved_link_reader_get_by_name;
self->refcount = 1;
self->wschema = avro_schema_incref(wschema);
self->rschema = avro_schema_incref(rschema);
self->free_iface = avro_resolved_link_reader_free_iface;
self->init = avro_resolved_link_reader_init;
self->done = avro_resolved_link_reader_done;
self->reset_wrappers = avro_resolved_link_reader_reset;
self->parent.get_boolean = avro_resolved_link_reader_get_boolean;
self->parent.get_bytes = avro_resolved_link_reader_get_bytes;
self->parent.grab_bytes = avro_resolved_link_reader_grab_bytes;
self->parent.get_double = avro_resolved_link_reader_get_double;
self->parent.get_float = avro_resolved_link_reader_get_float;
self->parent.get_int = avro_resolved_link_reader_get_int;
self->parent.get_long = avro_resolved_link_reader_get_long;
self->parent.get_null = avro_resolved_link_reader_get_null;
self->parent.get_string = avro_resolved_link_reader_get_string;
self->parent.grab_string = avro_resolved_link_reader_grab_string;
self->parent.get_enum = avro_resolved_link_reader_get_enum;
self->parent.get_fixed = avro_resolved_link_reader_get_fixed;
self->parent.grab_fixed = avro_resolved_link_reader_grab_fixed;
self->parent.set_boolean = avro_resolved_link_reader_set_boolean;
self->parent.set_bytes = avro_resolved_link_reader_set_bytes;
self->parent.give_bytes = avro_resolved_link_reader_give_bytes;
self->parent.set_double = avro_resolved_link_reader_set_double;
self->parent.set_float = avro_resolved_link_reader_set_float;
self->parent.set_int = avro_resolved_link_reader_set_int;
self->parent.set_long = avro_resolved_link_reader_set_long;
self->parent.set_null = avro_resolved_link_reader_set_null;
self->parent.set_string = avro_resolved_link_reader_set_string;
self->parent.set_string_len = avro_resolved_link_reader_set_string_len;
self->parent.give_string_len = avro_resolved_link_reader_give_string_len;
self->parent.set_enum = avro_resolved_link_reader_set_enum;
self->parent.set_fixed = avro_resolved_link_reader_set_fixed;
self->parent.give_fixed = avro_resolved_link_reader_give_fixed;
self->parent.get_size = avro_resolved_link_reader_get_size;
self->parent.get_by_index = avro_resolved_link_reader_get_by_index;
self->parent.get_by_name = avro_resolved_link_reader_get_by_name;
self->parent.get_discriminant = avro_resolved_link_reader_get_discriminant;
self->parent.get_current_branch = avro_resolved_link_reader_get_current_branch;
self->parent.append = avro_resolved_link_reader_append;
self->parent.add = avro_resolved_link_reader_add;
self->parent.set_branch = avro_resolved_link_reader_set_branch;
return container_of(self, avro_resolved_link_reader_t, parent);
}
static avro_resolved_reader_t *
try_wlink(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
AVRO_UNUSED(rschema);
/*
* For link schemas, we create a special value implementation
* that allocates space for its wrapped value at runtime. This
* lets us handle recursive types without having to instantiate
* in infinite-size value.
*/
avro_schema_t wtarget = avro_schema_link_target(wschema);
avro_resolved_link_reader_t *lself =
avro_resolved_link_reader_create(wtarget, rschema);
avro_memoize_set(&state->mem, wschema, rschema, lself);
avro_resolved_reader_t *target_resolver =
avro_resolved_reader_new_memoized(state, wtarget, rschema);
if (target_resolver == NULL) {
avro_memoize_delete(&state->mem, wschema, rschema);
avro_value_iface_decref(&lself->parent.parent);
avro_prefix_error("Link target isn't compatible: ");
AVRO_DEBUG("%s", avro_strerror());
return NULL;
}
lself->parent.calculate_size = avro_resolved_wlink_reader_calculate_size;
lself->target_resolver = target_resolver;
lself->next = state->links;
state->links = lself;
return &lself->parent;
}
static avro_resolved_reader_t *
try_rlink(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
AVRO_UNUSED(rschema);
/*
* For link schemas, we create a special value implementation
* that allocates space for its wrapped value at runtime. This
* lets us handle recursive types without having to instantiate
* in infinite-size value.
*/
avro_schema_t rtarget = avro_schema_link_target(rschema);
avro_resolved_link_reader_t *lself =
avro_resolved_link_reader_create(wschema, rtarget);
avro_memoize_set(&state->mem, wschema, rschema, lself);
avro_resolved_reader_t *target_resolver =
avro_resolved_reader_new_memoized(state, wschema, rtarget);
if (target_resolver == NULL) {
avro_memoize_delete(&state->mem, wschema, rschema);
avro_value_iface_decref(&lself->parent.parent);
avro_prefix_error("Link target isn't compatible: ");
AVRO_DEBUG("%s", avro_strerror());
return NULL;
}
lself->parent.calculate_size = avro_resolved_rlink_reader_calculate_size;
lself->target_resolver = target_resolver;
lself->next = state->links;
state->links = lself;
return &lself->parent;
}
/*-----------------------------------------------------------------------
* boolean
*/
static int
avro_resolved_reader_get_boolean(const avro_value_iface_t *viface,
const void *vself, int *val)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Getting boolean from %p", src->self);
return avro_value_get_boolean(src, val);
}
static avro_resolved_reader_t *
try_boolean(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
if (is_avro_boolean(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_boolean = avro_resolved_reader_get_boolean;
return self;
}
avro_set_error("Writer %s not compatible with reader boolean",
avro_schema_type_name(wschema));
return NULL;
}
/*-----------------------------------------------------------------------
* bytes
*/
static int
avro_resolved_reader_get_bytes(const avro_value_iface_t *viface,
const void *vself, const void **buf, size_t *size)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Getting bytes from %p", src->self);
return avro_value_get_bytes(src, buf, size);
}
static int
avro_resolved_reader_grab_bytes(const avro_value_iface_t *viface,
const void *vself, avro_wrapped_buffer_t *dest)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Grabbing bytes from %p", src->self);
return avro_value_grab_bytes(src, dest);
}
static avro_resolved_reader_t *
try_bytes(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
if (is_avro_bytes(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_bytes = avro_resolved_reader_get_bytes;
self->parent.grab_bytes = avro_resolved_reader_grab_bytes;
return self;
}
avro_set_error("Writer %s not compatible with reader bytes",
avro_schema_type_name(wschema));
return NULL;
}
/*-----------------------------------------------------------------------
* double
*/
static int
avro_resolved_reader_get_double(const avro_value_iface_t *viface,
const void *vself, double *val)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Getting double from %p", src->self);
return avro_value_get_double(src, val);
}
static int
avro_resolved_reader_get_double_float(const avro_value_iface_t *viface,
const void *vself, double *val)
{
int rval;
float real_val;
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Promoting double from float %p", src->self);
check(rval, avro_value_get_float(src, &real_val));
*val = real_val;
return 0;
}
static int
avro_resolved_reader_get_double_int(const avro_value_iface_t *viface,
const void *vself, double *val)
{
int rval;
int32_t real_val;
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Promoting double from int %p", src->self);
check(rval, avro_value_get_int(src, &real_val));
*val = real_val;
return 0;
}
static int
avro_resolved_reader_get_double_long(const avro_value_iface_t *viface,
const void *vself, double *val)
{
int rval;
int64_t real_val;
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Promoting double from long %p", src->self);
check(rval, avro_value_get_long(src, &real_val));
*val = (double) real_val;
return 0;
}
static avro_resolved_reader_t *
try_double(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
if (is_avro_double(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_double = avro_resolved_reader_get_double;
return self;
}
else if (is_avro_float(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_double = avro_resolved_reader_get_double_float;
return self;
}
else if (is_avro_int32(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_double = avro_resolved_reader_get_double_int;
return self;
}
else if (is_avro_int64(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_double = avro_resolved_reader_get_double_long;
return self;
}
avro_set_error("Writer %s not compatible with reader double",
avro_schema_type_name(wschema));
return NULL;
}
/*-----------------------------------------------------------------------
* float
*/
static int
avro_resolved_reader_get_float(const avro_value_iface_t *viface,
const void *vself, float *val)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Getting float from %p", src->self);
return avro_value_get_float(src, val);
}
static int
avro_resolved_reader_get_float_int(const avro_value_iface_t *viface,
const void *vself, float *val)
{
int rval;
int32_t real_val;
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Promoting float from int %p", src->self);
check(rval, avro_value_get_int(src, &real_val));
*val = (float) real_val;
return 0;
}
static int
avro_resolved_reader_get_float_long(const avro_value_iface_t *viface,
const void *vself, float *val)
{
int rval;
int64_t real_val;
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Promoting float from long %p", src->self);
check(rval, avro_value_get_long(src, &real_val));
*val = (float) real_val;
return 0;
}
static avro_resolved_reader_t *
try_float(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
if (is_avro_float(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_float = avro_resolved_reader_get_float;
return self;
}
else if (is_avro_int32(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_float = avro_resolved_reader_get_float_int;
return self;
}
else if (is_avro_int64(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_float = avro_resolved_reader_get_float_long;
return self;
}
avro_set_error("Writer %s not compatible with reader float",
avro_schema_type_name(wschema));
return NULL;
}
/*-----------------------------------------------------------------------
* int
*/
static int
avro_resolved_reader_get_int(const avro_value_iface_t *viface,
const void *vself, int32_t *val)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Getting int from %p", src->self);
return avro_value_get_int(src, val);
}
static avro_resolved_reader_t *
try_int(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
if (is_avro_int32(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_int = avro_resolved_reader_get_int;
return self;
}
avro_set_error("Writer %s not compatible with reader int",
avro_schema_type_name(wschema));
return NULL;
}
/*-----------------------------------------------------------------------
* long
*/
static int
avro_resolved_reader_get_long(const avro_value_iface_t *viface,
const void *vself, int64_t *val)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Getting long from %p", src->self);
return avro_value_get_long(src, val);
}
static int
avro_resolved_reader_get_long_int(const avro_value_iface_t *viface,
const void *vself, int64_t *val)
{
int rval;
int32_t real_val;
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Promoting long from int %p", src->self);
check(rval, avro_value_get_int(src, &real_val));
*val = real_val;
return 0;
}
static avro_resolved_reader_t *
try_long(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
if (is_avro_int64(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_long = avro_resolved_reader_get_long;
return self;
}
else if (is_avro_int32(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_long = avro_resolved_reader_get_long_int;
return self;
}
avro_set_error("Writer %s not compatible with reader long",
avro_schema_type_name(wschema));
return NULL;
}
/*-----------------------------------------------------------------------
* null
*/
static int
avro_resolved_reader_get_null(const avro_value_iface_t *viface,
const void *vself)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Getting null from %p", src->self);
return avro_value_get_null(src);
}
static avro_resolved_reader_t *
try_null(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
if (is_avro_null(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_null = avro_resolved_reader_get_null;
return self;
}
avro_set_error("Writer %s not compatible with reader null",
avro_schema_type_name(wschema));
return NULL;
}
/*-----------------------------------------------------------------------
* string
*/
static int
avro_resolved_reader_get_string(const avro_value_iface_t *viface,
const void *vself, const char **str, size_t *size)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Getting string from %p", src->self);
return avro_value_get_string(src, str, size);
}
static int
avro_resolved_reader_grab_string(const avro_value_iface_t *viface,
const void *vself, avro_wrapped_buffer_t *dest)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Grabbing string from %p", src->self);
return avro_value_grab_string(src, dest);
}
static avro_resolved_reader_t *
try_string(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
if (is_avro_string(wschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_string = avro_resolved_reader_get_string;
self->parent.grab_string = avro_resolved_reader_grab_string;
return self;
}
avro_set_error("Writer %s not compatible with reader string",
avro_schema_type_name(wschema));
return NULL;
}
/*-----------------------------------------------------------------------
* array
*/
typedef struct avro_resolved_array_reader {
avro_resolved_reader_t parent;
avro_resolved_reader_t *child_resolver;
} avro_resolved_array_reader_t;
typedef struct avro_resolved_array_value {
avro_value_t wrapped;
avro_raw_array_t children;
} avro_resolved_array_value_t;
static void
avro_resolved_array_reader_calculate_size(avro_resolved_reader_t *iface)
{
avro_resolved_array_reader_t *aiface =
container_of(iface, avro_resolved_array_reader_t, parent);
/* Only calculate the size for any resolver once */
iface->calculate_size = NULL;
AVRO_DEBUG("Calculating size for %s->%s",
avro_schema_type_name((iface)->wschema),
avro_schema_type_name((iface)->rschema));
iface->instance_size = sizeof(avro_resolved_array_value_t);
avro_resolved_reader_calculate_size(aiface->child_resolver);
}
static void
avro_resolved_array_reader_free_iface(avro_resolved_reader_t *iface, st_table *freeing)
{
avro_resolved_array_reader_t *aiface =
container_of(iface, avro_resolved_array_reader_t, parent);
free_resolver(aiface->child_resolver, freeing);
avro_schema_decref(iface->wschema);
avro_schema_decref(iface->rschema);
avro_freet(avro_resolved_array_reader_t, iface);
}
static int
avro_resolved_array_reader_init(const avro_resolved_reader_t *iface, void *vself)
{
const avro_resolved_array_reader_t *aiface =
container_of(iface, avro_resolved_array_reader_t, parent);
avro_resolved_array_value_t *self = (avro_resolved_array_value_t *) vself;
size_t child_instance_size = aiface->child_resolver->instance_size;
AVRO_DEBUG("Initializing child array (child_size=%" PRIsz ")", child_instance_size);
avro_raw_array_init(&self->children, child_instance_size);
return 0;
}
static void
avro_resolved_array_reader_free_elements(const avro_resolved_reader_t *child_iface,
avro_resolved_array_value_t *self)
{
size_t i;
for (i = 0; i < avro_raw_array_size(&self->children); i++) {
void *child_self = avro_raw_array_get_raw(&self->children, i);
avro_resolved_reader_done(child_iface, child_self);
}
}
static void
avro_resolved_array_reader_done(const avro_resolved_reader_t *iface, void *vself)
{
const avro_resolved_array_reader_t *aiface =
container_of(iface, avro_resolved_array_reader_t, parent);
avro_resolved_array_value_t *self = (avro_resolved_array_value_t *) vself;
avro_resolved_array_reader_free_elements(aiface->child_resolver, self);
avro_raw_array_done(&self->children);
}
static int
avro_resolved_array_reader_reset(const avro_resolved_reader_t *iface, void *vself)
{
const avro_resolved_array_reader_t *aiface =
container_of(iface, avro_resolved_array_reader_t, parent);
avro_resolved_array_value_t *self = (avro_resolved_array_value_t *) vself;
/* Clear out our cache of wrapped children */
avro_resolved_array_reader_free_elements(aiface->child_resolver, self);
avro_raw_array_clear(&self->children);
return 0;
}
static int
avro_resolved_array_reader_get_size(const avro_value_iface_t *viface,
const void *vself, size_t *size)
{
AVRO_UNUSED(viface);
const avro_resolved_array_value_t *self = (const avro_resolved_array_value_t *) vself;
return avro_value_get_size(&self->wrapped, size);
}
static int
avro_resolved_array_reader_get_by_index(const avro_value_iface_t *viface,
const void *vself, size_t index,
avro_value_t *child, const char **name)
{
int rval;
size_t old_size;
size_t new_size;
const avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
const avro_resolved_array_reader_t *aiface =
container_of(iface, avro_resolved_array_reader_t, parent);
avro_resolved_array_value_t *self = (avro_resolved_array_value_t *) vself;
/*
* Ensure that our child wrapper array is big enough to hold
* this many elements.
*/
new_size = index + 1;
check(rval, avro_raw_array_ensure_size0(&self->children, new_size));
old_size = avro_raw_array_size(&self->children);
if (old_size <= index) {
size_t i;
for (i = old_size; i < new_size; i++) {
check(rval, avro_resolved_reader_init
(aiface->child_resolver,
avro_raw_array_get_raw(&self->children, i)));
}
avro_raw_array_size(&self->children) = index+1;
}
child->iface = &aiface->child_resolver->parent;
child->self = avro_raw_array_get_raw(&self->children, index);
AVRO_DEBUG("Getting element %" PRIsz " from array %p", index, self->wrapped.self);
return avro_value_get_by_index(&self->wrapped, index, (avro_value_t *) child->self, name);
}
static avro_resolved_array_reader_t *
avro_resolved_array_reader_create(avro_schema_t wschema, avro_schema_t rschema)
{
avro_resolved_reader_t *self = (avro_resolved_reader_t *) avro_new(avro_resolved_array_reader_t);
memset(self, 0, sizeof(avro_resolved_array_reader_t));
self->parent.incref_iface = avro_resolved_reader_incref_iface;
self->parent.decref_iface = avro_resolved_reader_decref_iface;
self->parent.incref = avro_resolved_reader_incref;
self->parent.decref = avro_resolved_reader_decref;
self->parent.reset = avro_resolved_reader_reset;
self->parent.get_type = avro_resolved_reader_get_type;
self->parent.get_schema = avro_resolved_reader_get_schema;
self->parent.get_size = avro_resolved_array_reader_get_size;
self->parent.get_by_index = avro_resolved_array_reader_get_by_index;
self->refcount = 1;
self->wschema = avro_schema_incref(wschema);
self->rschema = avro_schema_incref(rschema);
self->calculate_size = avro_resolved_array_reader_calculate_size;
self->free_iface = avro_resolved_array_reader_free_iface;
self->init = avro_resolved_array_reader_init;
self->done = avro_resolved_array_reader_done;
self->reset_wrappers = avro_resolved_array_reader_reset;
return container_of(self, avro_resolved_array_reader_t, parent);
}
static avro_resolved_reader_t *
try_array(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
/*
* First verify that the writer is an array.
*/
if (!is_avro_array(wschema)) {
return 0;
}
/*
* Array schemas have to have compatible element schemas to be
* compatible themselves. Try to create an resolver to check
* the compatibility.
*/
avro_resolved_array_reader_t *aself =
avro_resolved_array_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, aself);
avro_schema_t witems = avro_schema_array_items(wschema);
avro_schema_t ritems = avro_schema_array_items(rschema);
avro_resolved_reader_t *item_resolver =
avro_resolved_reader_new_memoized(state, witems, ritems);
if (item_resolver == NULL) {
avro_memoize_delete(&state->mem, wschema, rschema);
avro_value_iface_decref(&aself->parent.parent);
avro_prefix_error("Array values aren't compatible: ");
return NULL;
}
/*
* The two schemas are compatible. Store the item schema's
* resolver into the child_resolver field.
*/
aself->child_resolver = item_resolver;
return &aself->parent;
}
/*-----------------------------------------------------------------------
* enum
*/
static int
avro_resolved_reader_get_enum(const avro_value_iface_t *viface,
const void *vself, int *val)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Getting enum from %p", src->self);
return avro_value_get_enum(src, val);
}
static avro_resolved_reader_t *
try_enum(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
/*
* Enum schemas have to have the same name — but not the same
* list of symbols — to be compatible.
*/
if (is_avro_enum(wschema)) {
const char *wname = avro_schema_name(wschema);
const char *rname = avro_schema_name(rschema);
if (strcmp(wname, rname) == 0) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_enum = avro_resolved_reader_get_enum;
return self;
}
}
avro_set_error("Writer %s not compatible with reader %s",
avro_schema_type_name(wschema),
avro_schema_type_name(rschema));
return NULL;
}
/*-----------------------------------------------------------------------
* fixed
*/
static int
avro_resolved_reader_get_fixed(const avro_value_iface_t *viface,
const void *vself, const void **buf, size_t *size)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Getting fixed from %p", vself);
return avro_value_get_fixed(src, buf, size);
}
static int
avro_resolved_reader_grab_fixed(const avro_value_iface_t *viface,
const void *vself, avro_wrapped_buffer_t *dest)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
AVRO_DEBUG("Grabbing fixed from %p", vself);
return avro_value_grab_fixed(src, dest);
}
static avro_resolved_reader_t *
try_fixed(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
/*
* Fixed schemas need the same name and size to be compatible.
*/
if (avro_schema_equal(wschema, rschema)) {
avro_resolved_reader_t *self =
avro_resolved_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, self);
self->parent.get_fixed = avro_resolved_reader_get_fixed;
self->parent.grab_fixed = avro_resolved_reader_grab_fixed;
return self;
}
avro_set_error("Writer %s not compatible with reader %s",
avro_schema_type_name(wschema),
avro_schema_type_name(rschema));
return NULL;
}
/*-----------------------------------------------------------------------
* map
*/
typedef struct avro_resolved_map_reader {
avro_resolved_reader_t parent;
avro_resolved_reader_t *child_resolver;
} avro_resolved_map_reader_t;
typedef struct avro_resolved_map_value {
avro_value_t wrapped;
avro_raw_array_t children;
} avro_resolved_map_value_t;
static void
avro_resolved_map_reader_calculate_size(avro_resolved_reader_t *iface)
{
avro_resolved_map_reader_t *miface =
container_of(iface, avro_resolved_map_reader_t, parent);
/* Only calculate the size for any resolver once */
iface->calculate_size = NULL;
AVRO_DEBUG("Calculating size for %s->%s",
avro_schema_type_name((iface)->wschema),
avro_schema_type_name((iface)->rschema));
iface->instance_size = sizeof(avro_resolved_map_value_t);
avro_resolved_reader_calculate_size(miface->child_resolver);
}
static void
avro_resolved_map_reader_free_iface(avro_resolved_reader_t *iface, st_table *freeing)
{
avro_resolved_map_reader_t *miface =
container_of(iface, avro_resolved_map_reader_t, parent);
free_resolver(miface->child_resolver, freeing);
avro_schema_decref(iface->wschema);
avro_schema_decref(iface->rschema);
avro_freet(avro_resolved_map_reader_t, iface);
}
static int
avro_resolved_map_reader_init(const avro_resolved_reader_t *iface, void *vself)
{
const avro_resolved_map_reader_t *miface =
container_of(iface, avro_resolved_map_reader_t, parent);
avro_resolved_map_value_t *self = (avro_resolved_map_value_t *) vself;
size_t child_instance_size = miface->child_resolver->instance_size;
AVRO_DEBUG("Initializing child array for map (child_size=%" PRIsz ")", child_instance_size);
avro_raw_array_init(&self->children, child_instance_size);
return 0;
}
static void
avro_resolved_map_reader_free_elements(const avro_resolved_reader_t *child_iface,
avro_resolved_map_value_t *self)
{
size_t i;
for (i = 0; i < avro_raw_array_size(&self->children); i++) {
void *child_self = avro_raw_array_get_raw(&self->children, i);
avro_resolved_reader_done(child_iface, child_self);
}
}
static void
avro_resolved_map_reader_done(const avro_resolved_reader_t *iface, void *vself)
{
const avro_resolved_map_reader_t *miface =
container_of(iface, avro_resolved_map_reader_t, parent);
avro_resolved_map_value_t *self = (avro_resolved_map_value_t *) vself;
avro_resolved_map_reader_free_elements(miface->child_resolver, self);
avro_raw_array_done(&self->children);
}
static int
avro_resolved_map_reader_reset(const avro_resolved_reader_t *iface, void *vself)
{
const avro_resolved_map_reader_t *miface =
container_of(iface, avro_resolved_map_reader_t, parent);
avro_resolved_map_value_t *self = (avro_resolved_map_value_t *) vself;
/* Clear out our cache of wrapped children */
avro_resolved_map_reader_free_elements(miface->child_resolver, self);
return 0;
}
static int
avro_resolved_map_reader_get_size(const avro_value_iface_t *viface,
const void *vself, size_t *size)
{
AVRO_UNUSED(viface);
const avro_value_t *src = (const avro_value_t *) vself;
return avro_value_get_size(src, size);
}
static int
avro_resolved_map_reader_get_by_index(const avro_value_iface_t *viface,
const void *vself, size_t index,
avro_value_t *child, const char **name)
{
int rval;
const avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
const avro_resolved_map_reader_t *miface =
container_of(iface, avro_resolved_map_reader_t, parent);
avro_resolved_map_value_t *self = (avro_resolved_map_value_t *) vself;
/*
* Ensure that our child wrapper array is big enough to hold
* this many elements.
*/
check(rval, avro_raw_array_ensure_size0(&self->children, index+1));
if (avro_raw_array_size(&self->children) <= index) {
avro_raw_array_size(&self->children) = index+1;
}
child->iface = &miface->child_resolver->parent;
child->self = avro_raw_array_get_raw(&self->children, index);
AVRO_DEBUG("Getting element %" PRIsz " from map %p", index, self->wrapped.self);
return avro_value_get_by_index(&self->wrapped, index, (avro_value_t *) child->self, name);
}
static int
avro_resolved_map_reader_get_by_name(const avro_value_iface_t *viface,
const void *vself, const char *name,
avro_value_t *child, size_t *index)
{
int rval;
const avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
const avro_resolved_map_reader_t *miface =
container_of(iface, avro_resolved_map_reader_t, parent);
avro_resolved_map_value_t *self = (avro_resolved_map_value_t *) vself;
/*
* This is a bit convoluted. We need to stash the wrapped child
* value somewhere in our children array. But we don't know
* where to put it until the wrapped map tells us what its index
* is.
*/
avro_value_t real_child;
size_t real_index;
AVRO_DEBUG("Getting element %s from map %p", name, self->wrapped.self);
check(rval, avro_value_get_by_name
(&self->wrapped, name, &real_child, &real_index));
/*
* Ensure that our child wrapper array is big enough to hold
* this many elements.
*/
check(rval, avro_raw_array_ensure_size0(&self->children, real_index+1));
if (avro_raw_array_size(&self->children) <= real_index) {
avro_raw_array_size(&self->children) = real_index+1;
}
child->iface = &miface->child_resolver->parent;
child->self = avro_raw_array_get_raw(&self->children, real_index);
avro_value_t *child_vself = (avro_value_t *) child->self;
*child_vself = real_child;
if (index != NULL) {
*index = real_index;
}
return 0;
}
static avro_resolved_map_reader_t *
avro_resolved_map_reader_create(avro_schema_t wschema, avro_schema_t rschema)
{
avro_resolved_reader_t *self = (avro_resolved_reader_t *) avro_new(avro_resolved_map_reader_t);
memset(self, 0, sizeof(avro_resolved_map_reader_t));
self->parent.incref_iface = avro_resolved_reader_incref_iface;
self->parent.decref_iface = avro_resolved_reader_decref_iface;
self->parent.incref = avro_resolved_reader_incref;
self->parent.decref = avro_resolved_reader_decref;
self->parent.reset = avro_resolved_reader_reset;
self->parent.get_type = avro_resolved_reader_get_type;
self->parent.get_schema = avro_resolved_reader_get_schema;
self->parent.get_size = avro_resolved_map_reader_get_size;
self->parent.get_by_index = avro_resolved_map_reader_get_by_index;
self->parent.get_by_name = avro_resolved_map_reader_get_by_name;
self->refcount = 1;
self->wschema = avro_schema_incref(wschema);
self->rschema = avro_schema_incref(rschema);
self->calculate_size = avro_resolved_map_reader_calculate_size;
self->free_iface = avro_resolved_map_reader_free_iface;
self->init = avro_resolved_map_reader_init;
self->done = avro_resolved_map_reader_done;
self->reset_wrappers = avro_resolved_map_reader_reset;
return container_of(self, avro_resolved_map_reader_t, parent);
}
static avro_resolved_reader_t *
try_map(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
/*
* First verify that the reader is an map.
*/
if (!is_avro_map(wschema)) {
return 0;
}
/*
* Map schemas have to have compatible element schemas to be
* compatible themselves. Try to create an resolver to check
* the compatibility.
*/
avro_resolved_map_reader_t *mself =
avro_resolved_map_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, mself);
avro_schema_t witems = avro_schema_map_values(wschema);
avro_schema_t ritems = avro_schema_map_values(rschema);
avro_resolved_reader_t *item_resolver =
avro_resolved_reader_new_memoized(state, witems, ritems);
if (item_resolver == NULL) {
avro_memoize_delete(&state->mem, wschema, rschema);
avro_value_iface_decref(&mself->parent.parent);
avro_prefix_error("Map values aren't compatible: ");
return NULL;
}
/*
* The two schemas are compatible. Store the item schema's
* resolver into the child_resolver field.
*/
mself->child_resolver = item_resolver;
return &mself->parent;
}
/*-----------------------------------------------------------------------
* record
*/
typedef struct avro_resolved_record_reader {
avro_resolved_reader_t parent;
size_t field_count;
size_t *field_offsets;
avro_resolved_reader_t **field_resolvers;
size_t *index_mapping;
} avro_resolved_record_reader_t;
typedef struct avro_resolved_record_value {
avro_value_t wrapped;
/* The rest of the struct is taken up by the inline storage
* needed for each field. */
} avro_resolved_record_value_t;
/** Return a pointer to the given field within a record struct. */
#define avro_resolved_record_field(iface, rec, index) \
(((char *) (rec)) + (iface)->field_offsets[(index)])
static void
avro_resolved_record_reader_calculate_size(avro_resolved_reader_t *iface)
{
avro_resolved_record_reader_t *riface =
container_of(iface, avro_resolved_record_reader_t, parent);
/* Only calculate the size for any resolver once */
iface->calculate_size = NULL;
AVRO_DEBUG("Calculating size for %s->%s",
avro_schema_type_name((iface)->wschema),
avro_schema_type_name((iface)->rschema));
/*
* Once we've figured out which reader fields we actually need,
* calculate an offset for each one.
*/
size_t ri;
size_t next_offset = sizeof(avro_resolved_record_value_t);
for (ri = 0; ri < riface->field_count; ri++) {
riface->field_offsets[ri] = next_offset;
if (riface->field_resolvers[ri] != NULL) {
avro_resolved_reader_calculate_size
(riface->field_resolvers[ri]);
size_t field_size =
riface->field_resolvers[ri]->instance_size;
AVRO_DEBUG("Field %" PRIsz " has size %" PRIsz, ri, field_size);
next_offset += field_size;
} else {
AVRO_DEBUG("Field %" PRIsz " is being skipped", ri);
}
}
AVRO_DEBUG("Record has size %" PRIsz, next_offset);
iface->instance_size = next_offset;
}
static void
avro_resolved_record_reader_free_iface(avro_resolved_reader_t *iface, st_table *freeing)
{
avro_resolved_record_reader_t *riface =
container_of(iface, avro_resolved_record_reader_t, parent);
if (riface->field_offsets != NULL) {
avro_free(riface->field_offsets,
riface->field_count * sizeof(size_t));
}
if (riface->field_resolvers != NULL) {
size_t i;
for (i = 0; i < riface->field_count; i++) {
if (riface->field_resolvers[i] != NULL) {
AVRO_DEBUG("Freeing field %" PRIsz " %p", i,
riface->field_resolvers[i]);
free_resolver(riface->field_resolvers[i], freeing);
}
}
avro_free(riface->field_resolvers,
riface->field_count * sizeof(avro_resolved_reader_t *));
}
if (riface->index_mapping != NULL) {
avro_free(riface->index_mapping,
riface->field_count * sizeof(size_t));
}
avro_schema_decref(iface->wschema);
avro_schema_decref(iface->rschema);
avro_freet(avro_resolved_record_reader_t, iface);
}
static int
avro_resolved_record_reader_init(const avro_resolved_reader_t *iface, void *vself)
{
int rval;
const avro_resolved_record_reader_t *riface =
container_of(iface, avro_resolved_record_reader_t, parent);
avro_resolved_record_value_t *self = (avro_resolved_record_value_t *) vself;
/* Initialize each field */
size_t i;
for (i = 0; i < riface->field_count; i++) {
if (riface->field_resolvers[i] != NULL) {
check(rval, avro_resolved_reader_init
(riface->field_resolvers[i],
avro_resolved_record_field(riface, self, i)));
}
}
return 0;
}
static void
avro_resolved_record_reader_done(const avro_resolved_reader_t *iface, void *vself)
{
const avro_resolved_record_reader_t *riface =
container_of(iface, avro_resolved_record_reader_t, parent);
avro_resolved_record_value_t *self = (avro_resolved_record_value_t *) vself;
/* Finalize each field */
size_t i;
for (i = 0; i < riface->field_count; i++) {
if (riface->field_resolvers[i] != NULL) {
avro_resolved_reader_done
(riface->field_resolvers[i],
avro_resolved_record_field(riface, self, i));
}
}
}
static int
avro_resolved_record_reader_reset(const avro_resolved_reader_t *iface, void *vself)
{
int rval;
const avro_resolved_record_reader_t *riface =
container_of(iface, avro_resolved_record_reader_t, parent);
avro_resolved_record_value_t *self = (avro_resolved_record_value_t *) vself;
/* Reset each field */
size_t i;
for (i = 0; i < riface->field_count; i++) {
if (riface->field_resolvers[i] != NULL) {
check(rval, avro_resolved_reader_reset_wrappers
(riface->field_resolvers[i],
avro_resolved_record_field(riface, self, i)));
}
}
return 0;
}
static int
avro_resolved_record_reader_get_size(const avro_value_iface_t *viface,
const void *vself, size_t *size)
{
AVRO_UNUSED(vself);
const avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
const avro_resolved_record_reader_t *riface =
container_of(iface, avro_resolved_record_reader_t, parent);
*size = riface->field_count;
return 0;
}
static int
avro_resolved_record_reader_get_by_index(const avro_value_iface_t *viface,
const void *vself, size_t index,
avro_value_t *child, const char **name)
{
const avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
const avro_resolved_record_reader_t *riface =
container_of(iface, avro_resolved_record_reader_t, parent);
const avro_resolved_record_value_t *self = (avro_resolved_record_value_t *) vself;
AVRO_DEBUG("Getting reader field %" PRIsz " from record %p", index, self->wrapped.self);
if (riface->field_resolvers[index] == NULL) {
/*
* TODO: Return the default value if the writer record
* doesn't contain this field.
*/
AVRO_DEBUG("Writer doesn't have field");
avro_set_error("NIY: Default values");
return EINVAL;
}
size_t writer_index = riface->index_mapping[index];
AVRO_DEBUG(" Writer field is %" PRIsz, writer_index);
child->iface = &riface->field_resolvers[index]->parent;
child->self = avro_resolved_record_field(riface, self, index);
return avro_value_get_by_index(&self->wrapped, writer_index, (avro_value_t *) child->self, name);
}
static int
avro_resolved_record_reader_get_by_name(const avro_value_iface_t *viface,
const void *vself, const char *name,
avro_value_t *child, size_t *index)
{
const avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
int ri = avro_schema_record_field_get_index(iface->rschema, name);
if (ri == -1) {
avro_set_error("Record doesn't have field named %s", name);
return EINVAL;
}
AVRO_DEBUG("Reader field %s is at index %d", name, ri);
if (index != NULL) {
*index = ri;
}
return avro_resolved_record_reader_get_by_index(viface, vself, ri, child, NULL);
}
static avro_resolved_record_reader_t *
avro_resolved_record_reader_create(avro_schema_t wschema, avro_schema_t rschema)
{
avro_resolved_reader_t *self = (avro_resolved_reader_t *) avro_new(avro_resolved_record_reader_t);
memset(self, 0, sizeof(avro_resolved_record_reader_t));
self->parent.incref_iface = avro_resolved_reader_incref_iface;
self->parent.decref_iface = avro_resolved_reader_decref_iface;
self->parent.incref = avro_resolved_reader_incref;
self->parent.decref = avro_resolved_reader_decref;
self->parent.reset = avro_resolved_reader_reset;
self->parent.get_type = avro_resolved_reader_get_type;
self->parent.get_schema = avro_resolved_reader_get_schema;
self->parent.get_size = avro_resolved_record_reader_get_size;
self->parent.get_by_index = avro_resolved_record_reader_get_by_index;
self->parent.get_by_name = avro_resolved_record_reader_get_by_name;
self->refcount = 1;
self->wschema = avro_schema_incref(wschema);
self->rschema = avro_schema_incref(rschema);
self->calculate_size = avro_resolved_record_reader_calculate_size;
self->free_iface = avro_resolved_record_reader_free_iface;
self->init = avro_resolved_record_reader_init;
self->done = avro_resolved_record_reader_done;
self->reset_wrappers = avro_resolved_record_reader_reset;
return container_of(self, avro_resolved_record_reader_t, parent);
}
static avro_resolved_reader_t *
try_record(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
/*
* First verify that the writer is also a record, and has the
* same name as the reader.
*/
if (!is_avro_record(wschema)) {
return 0;
}
const char *wname = avro_schema_name(wschema);
const char *rname = avro_schema_name(rschema);
if (strcmp(wname, rname) != 0) {
return 0;
}
/*
* Categorize the fields in the record schemas. Fields that are
* only in the writer are ignored. Fields that are only in the
* reader raise a schema mismatch error, unless the field has a
* default value. Fields that are in both are resolved
* recursively.
*
* The field_resolvers array will contain an avro_value_iface_t
* for each field in the reader schema. To build this array, we
* loop through the fields of the reader schema. If that field
* is also in the writer schema, we resolve them recursively,
* and store the resolver into the array. If the field isn't in
* the writer schema, we raise an error. (TODO: Eventually,
* we'll handle default values here.) After this loop finishes,
* any NULLs in the field_resolvers array will represent fields
* in the writer but not the reader; these fields should be
* skipped, and won't be accessible in the resolved reader.
*/
avro_resolved_record_reader_t *rself =
avro_resolved_record_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, rself);
size_t rfields = avro_schema_record_size(rschema);
AVRO_DEBUG("Checking reader record schema %s", wname);
avro_resolved_reader_t **field_resolvers =
(avro_resolved_reader_t **) avro_calloc(rfields, sizeof(avro_resolved_reader_t *));
size_t *field_offsets = (size_t *) avro_calloc(rfields, sizeof(size_t));
size_t *index_mapping = (size_t *) avro_calloc(rfields, sizeof(size_t));
size_t ri;
for (ri = 0; ri < rfields; ri++) {
avro_schema_t rfield =
avro_schema_record_field_get_by_index(rschema, ri);
const char *field_name =
avro_schema_record_field_name(rschema, ri);
AVRO_DEBUG("Resolving reader record field %" PRIsz " (%s)", ri, field_name);
/*
* See if this field is also in the writer schema.
*/
int wi = avro_schema_record_field_get_index(wschema, field_name);
if (wi == -1) {
/*
* This field isn't in the writer schema —
* that's an error! TODO: Handle default
* values!
*/
AVRO_DEBUG("Field %s isn't in writer", field_name);
avro_set_error("Reader field %s doesn't appear in writer",
field_name);
goto error;
}
/*
* Try to recursively resolve the schemas for this
* field. If they're not compatible, that's an error.
*/
avro_schema_t wfield =
avro_schema_record_field_get_by_index(wschema, wi);
avro_resolved_reader_t *field_resolver =
avro_resolved_reader_new_memoized(state, wfield, rfield);
if (field_resolver == NULL) {
avro_prefix_error("Field %s isn't compatible: ", field_name);
goto error;
}
/*
* Save the details for this field.
*/
AVRO_DEBUG("Found match for field %s (%" PRIsz " in reader, %d in writer)",
field_name, ri, wi);
field_resolvers[ri] = field_resolver;
index_mapping[ri] = wi;
}
/*
* We might not have found matches for all of the writer fields,
* but that's okay — any extras will be ignored.
*/
rself->field_count = rfields;
rself->field_offsets = field_offsets;
rself->field_resolvers = field_resolvers;
rself->index_mapping = index_mapping;
return &rself->parent;
error:
/*
* Clean up any resolver we might have already created.
*/
avro_memoize_delete(&state->mem, wschema, rschema);
avro_value_iface_decref(&rself->parent.parent);
{
unsigned int i;
for (i = 0; i < rfields; i++) {
if (field_resolvers[i]) {
avro_value_iface_decref(&field_resolvers[i]->parent);
}
}
}
avro_free(field_resolvers, rfields * sizeof(avro_resolved_reader_t *));
avro_free(field_offsets, rfields * sizeof(size_t));
avro_free(index_mapping, rfields * sizeof(size_t));
return NULL;
}
/*-----------------------------------------------------------------------
* writer union
*/
/*
* For writer unions, we maintain a list of resolvers for each branch of
* the union. When we encounter a writer value, we see which branch it
* is, and choose a reader resolver based on that.
*/
typedef struct avro_resolved_wunion_reader {
avro_resolved_reader_t parent;
/* The number of branches in the writer union */
size_t branch_count;
/* A child resolver for each branch of the writer union. If any
* of these are NULL, then we don't have anything on the reader
* side that's compatible with that writer branch. */
avro_resolved_reader_t **branch_resolvers;
} avro_resolved_wunion_reader_t;
typedef struct avro_resolved_wunion_value {
avro_value_t wrapped;
/** The currently active branch of the union. -1 if no branch
* is selected. */
int discriminant;
/* The rest of the struct is taken up by the inline storage
* needed for the active branch. */
} avro_resolved_wunion_value_t;
/** Return a pointer to the active branch within a union struct. */
#define avro_resolved_wunion_branch(_wunion) \
(((char *) (_wunion)) + sizeof(avro_resolved_wunion_value_t))
static void
avro_resolved_wunion_reader_calculate_size(avro_resolved_reader_t *iface)
{
avro_resolved_wunion_reader_t *uiface =
container_of(iface, avro_resolved_wunion_reader_t, parent);
/* Only calculate the size for any resolver once */
iface->calculate_size = NULL;
AVRO_DEBUG("Calculating size for %s->%s",
avro_schema_type_name((iface)->wschema),
avro_schema_type_name((iface)->rschema));
size_t i;
size_t max_branch_size = 0;
for (i = 0; i < uiface->branch_count; i++) {
if (uiface->branch_resolvers[i] == NULL) {
AVRO_DEBUG("No match for writer union branch %" PRIsz, i);
} else {
avro_resolved_reader_calculate_size
(uiface->branch_resolvers[i]);
size_t branch_size =
uiface->branch_resolvers[i]->instance_size;
AVRO_DEBUG("Writer branch %" PRIsz " has size %" PRIsz, i, branch_size);
if (branch_size > max_branch_size) {
max_branch_size = branch_size;
}
}
}
AVRO_DEBUG("Maximum branch size is %" PRIsz, max_branch_size);
iface->instance_size =
sizeof(avro_resolved_wunion_value_t) + max_branch_size;
AVRO_DEBUG("Total union size is %" PRIsz, iface->instance_size);
}
static void
avro_resolved_wunion_reader_free_iface(avro_resolved_reader_t *iface, st_table *freeing)
{
avro_resolved_wunion_reader_t *uiface =
container_of(iface, avro_resolved_wunion_reader_t, parent);
if (uiface->branch_resolvers != NULL) {
size_t i;
for (i = 0; i < uiface->branch_count; i++) {
if (uiface->branch_resolvers[i] != NULL) {
free_resolver(uiface->branch_resolvers[i], freeing);
}
}
avro_free(uiface->branch_resolvers,
uiface->branch_count * sizeof(avro_resolved_reader_t *));
}
avro_schema_decref(iface->wschema);
avro_schema_decref(iface->rschema);
avro_freet(avro_resolved_wunion_reader_t, iface);
}
static int
avro_resolved_wunion_reader_init(const avro_resolved_reader_t *iface, void *vself)
{
AVRO_UNUSED(iface);
avro_resolved_wunion_value_t *self = (avro_resolved_wunion_value_t *) vself;
self->discriminant = -1;
return 0;
}
static void
avro_resolved_wunion_reader_done(const avro_resolved_reader_t *iface, void *vself)
{
const avro_resolved_wunion_reader_t *uiface =
container_of(iface, avro_resolved_wunion_reader_t, parent);
avro_resolved_wunion_value_t *self = (avro_resolved_wunion_value_t *) vself;
if (self->discriminant >= 0) {
avro_resolved_reader_done
(uiface->branch_resolvers[self->discriminant],
avro_resolved_wunion_branch(self));
self->discriminant = -1;
}
}
static int
avro_resolved_wunion_reader_reset(const avro_resolved_reader_t *iface, void *vself)
{
const avro_resolved_wunion_reader_t *uiface =
container_of(iface, avro_resolved_wunion_reader_t, parent);
avro_resolved_wunion_value_t *self = (avro_resolved_wunion_value_t *) vself;
/* Keep the same branch selected, for the common case that we're
* about to reuse it. */
if (self->discriminant >= 0) {
return avro_resolved_reader_reset_wrappers
(uiface->branch_resolvers[self->discriminant],
avro_resolved_wunion_branch(self));
}
return 0;
}
static int
avro_resolved_wunion_get_real_src(const avro_value_iface_t *viface,
const void *vself, avro_value_t *real_src)
{
int rval;
const avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
const avro_resolved_wunion_reader_t *uiface =
container_of(iface, avro_resolved_wunion_reader_t, parent);
avro_resolved_wunion_value_t *self = (avro_resolved_wunion_value_t *) vself;
int writer_disc;
check(rval, avro_value_get_discriminant(&self->wrapped, &writer_disc));
AVRO_DEBUG("Writer is branch %d", writer_disc);
if (uiface->branch_resolvers[writer_disc] == NULL) {
avro_set_error("Reader isn't compatible with writer branch %d",
writer_disc);
return EINVAL;
}
if (self->discriminant == writer_disc) {
AVRO_DEBUG("Writer branch %d already selected", writer_disc);
} else {
if (self->discriminant >= 0) {
AVRO_DEBUG("Finalizing old writer branch %d", self->discriminant);
avro_resolved_reader_done
(uiface->branch_resolvers[self->discriminant],
avro_resolved_wunion_branch(self));
}
AVRO_DEBUG("Initializing writer branch %d", writer_disc);
check(rval, avro_resolved_reader_init
(uiface->branch_resolvers[writer_disc],
avro_resolved_wunion_branch(self)));
self->discriminant = writer_disc;
}
real_src->iface = &uiface->branch_resolvers[writer_disc]->parent;
real_src->self = avro_resolved_wunion_branch(self);
return avro_value_get_current_branch(&self->wrapped, (avro_value_t *) real_src->self);
}
static int
avro_resolved_wunion_reader_get_boolean(const avro_value_iface_t *viface,
const void *vself, int *out)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_boolean(&src, out);
}
static int
avro_resolved_wunion_reader_get_bytes(const avro_value_iface_t *viface,
const void *vself, const void **buf, size_t *size)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_bytes(&src, buf, size);
}
static int
avro_resolved_wunion_reader_grab_bytes(const avro_value_iface_t *viface,
const void *vself, avro_wrapped_buffer_t *dest)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_grab_bytes(&src, dest);
}
static int
avro_resolved_wunion_reader_get_double(const avro_value_iface_t *viface,
const void *vself, double *out)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_double(&src, out);
}
static int
avro_resolved_wunion_reader_get_float(const avro_value_iface_t *viface,
const void *vself, float *out)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_float(&src, out);
}
static int
avro_resolved_wunion_reader_get_int(const avro_value_iface_t *viface,
const void *vself, int32_t *out)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_int(&src, out);
}
static int
avro_resolved_wunion_reader_get_long(const avro_value_iface_t *viface,
const void *vself, int64_t *out)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_long(&src, out);
}
static int
avro_resolved_wunion_reader_get_null(const avro_value_iface_t *viface,
const void *vself)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_null(&src);
}
static int
avro_resolved_wunion_reader_get_string(const avro_value_iface_t *viface,
const void *vself, const char **str, size_t *size)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_string(&src, str, size);
}
static int
avro_resolved_wunion_reader_grab_string(const avro_value_iface_t *viface,
const void *vself, avro_wrapped_buffer_t *dest)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_grab_string(&src, dest);
}
static int
avro_resolved_wunion_reader_get_enum(const avro_value_iface_t *viface,
const void *vself, int *out)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_enum(&src, out);
}
static int
avro_resolved_wunion_reader_get_fixed(const avro_value_iface_t *viface,
const void *vself, const void **buf, size_t *size)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_fixed(&src, buf, size);
}
static int
avro_resolved_wunion_reader_grab_fixed(const avro_value_iface_t *viface,
const void *vself, avro_wrapped_buffer_t *dest)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_grab_fixed(&src, dest);
}
static int
avro_resolved_wunion_reader_set_boolean(const avro_value_iface_t *viface,
void *vself, int val)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_set_boolean(&src, val);
}
static int
avro_resolved_wunion_reader_set_bytes(const avro_value_iface_t *viface,
void *vself, void *buf, size_t size)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_set_bytes(&src, buf, size);
}
static int
avro_resolved_wunion_reader_give_bytes(const avro_value_iface_t *viface,
void *vself, avro_wrapped_buffer_t *buf)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_give_bytes(&src, buf);
}
static int
avro_resolved_wunion_reader_set_double(const avro_value_iface_t *viface,
void *vself, double val)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_set_double(&src, val);
}
static int
avro_resolved_wunion_reader_set_float(const avro_value_iface_t *viface,
void *vself, float val)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_set_float(&src, val);
}
static int
avro_resolved_wunion_reader_set_int(const avro_value_iface_t *viface,
void *vself, int32_t val)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_set_int(&src, val);
}
static int
avro_resolved_wunion_reader_set_long(const avro_value_iface_t *viface,
void *vself, int64_t val)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_set_long(&src, val);
}
static int
avro_resolved_wunion_reader_set_null(const avro_value_iface_t *viface,
void *vself)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_set_null(&src);
}
static int
avro_resolved_wunion_reader_set_string(const avro_value_iface_t *viface,
void *vself, const char *str)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_set_string(&src, str);
}
static int
avro_resolved_wunion_reader_set_string_len(const avro_value_iface_t *viface,
void *vself, const char *str, size_t size)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_set_string_len(&src, str, size);
}
static int
avro_resolved_wunion_reader_give_string_len(const avro_value_iface_t *viface,
void *vself, avro_wrapped_buffer_t *buf)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_give_string_len(&src, buf);
}
static int
avro_resolved_wunion_reader_set_enum(const avro_value_iface_t *viface,
void *vself, int val)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_set_enum(&src, val);
}
static int
avro_resolved_wunion_reader_set_fixed(const avro_value_iface_t *viface,
void *vself, void *buf, size_t size)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_set_fixed(&src, buf, size);
}
static int
avro_resolved_wunion_reader_give_fixed(const avro_value_iface_t *viface,
void *vself, avro_wrapped_buffer_t *dest)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_give_fixed(&src, dest);
}
static int
avro_resolved_wunion_reader_get_size(const avro_value_iface_t *viface,
const void *vself, size_t *size)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_size(&src, size);
}
static int
avro_resolved_wunion_reader_get_by_index(const avro_value_iface_t *viface,
const void *vself, size_t index,
avro_value_t *child, const char **name)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_by_index(&src, index, child, name);
}
static int
avro_resolved_wunion_reader_get_by_name(const avro_value_iface_t *viface,
const void *vself, const char *name,
avro_value_t *child, size_t *index)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_by_name(&src, name, child, index);
}
static int
avro_resolved_wunion_reader_get_discriminant(const avro_value_iface_t *viface,
const void *vself, int *out)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_discriminant(&src, out);
}
static int
avro_resolved_wunion_reader_get_current_branch(const avro_value_iface_t *viface,
const void *vself, avro_value_t *branch)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_get_current_branch(&src, branch);
}
static int
avro_resolved_wunion_reader_append(const avro_value_iface_t *viface,
void *vself, avro_value_t *child_out,
size_t *new_index)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_append(&src, child_out, new_index);
}
static int
avro_resolved_wunion_reader_add(const avro_value_iface_t *viface,
void *vself, const char *key,
avro_value_t *child, size_t *index, int *is_new)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_add(&src, key, child, index, is_new);
}
static int
avro_resolved_wunion_reader_set_branch(const avro_value_iface_t *viface,
void *vself, int discriminant,
avro_value_t *branch)
{
int rval;
avro_value_t src;
check(rval, avro_resolved_wunion_get_real_src(viface, vself, &src));
return avro_value_set_branch(&src, discriminant, branch);
}
static avro_resolved_wunion_reader_t *
avro_resolved_wunion_reader_create(avro_schema_t wschema, avro_schema_t rschema)
{
avro_resolved_reader_t *self = (avro_resolved_reader_t *) avro_new(avro_resolved_wunion_reader_t);
memset(self, 0, sizeof(avro_resolved_wunion_reader_t));
self->parent.incref_iface = avro_resolved_reader_incref_iface;
self->parent.decref_iface = avro_resolved_reader_decref_iface;
self->parent.incref = avro_resolved_reader_incref;
self->parent.decref = avro_resolved_reader_decref;
self->parent.reset = avro_resolved_reader_reset;
self->parent.get_type = avro_resolved_reader_get_type;
self->parent.get_schema = avro_resolved_reader_get_schema;
self->parent.get_boolean = avro_resolved_wunion_reader_get_boolean;
self->parent.grab_bytes = avro_resolved_wunion_reader_grab_bytes;
self->parent.get_bytes = avro_resolved_wunion_reader_get_bytes;
self->parent.get_double = avro_resolved_wunion_reader_get_double;
self->parent.get_float = avro_resolved_wunion_reader_get_float;
self->parent.get_int = avro_resolved_wunion_reader_get_int;
self->parent.get_long = avro_resolved_wunion_reader_get_long;
self->parent.get_null = avro_resolved_wunion_reader_get_null;
self->parent.get_string = avro_resolved_wunion_reader_get_string;
self->parent.grab_string = avro_resolved_wunion_reader_grab_string;
self->parent.get_enum = avro_resolved_wunion_reader_get_enum;
self->parent.get_fixed = avro_resolved_wunion_reader_get_fixed;
self->parent.grab_fixed = avro_resolved_wunion_reader_grab_fixed;
self->parent.set_boolean = avro_resolved_wunion_reader_set_boolean;
self->parent.set_bytes = avro_resolved_wunion_reader_set_bytes;
self->parent.give_bytes = avro_resolved_wunion_reader_give_bytes;
self->parent.set_double = avro_resolved_wunion_reader_set_double;
self->parent.set_float = avro_resolved_wunion_reader_set_float;
self->parent.set_int = avro_resolved_wunion_reader_set_int;
self->parent.set_long = avro_resolved_wunion_reader_set_long;
self->parent.set_null = avro_resolved_wunion_reader_set_null;
self->parent.set_string = avro_resolved_wunion_reader_set_string;
self->parent.set_string_len = avro_resolved_wunion_reader_set_string_len;
self->parent.give_string_len = avro_resolved_wunion_reader_give_string_len;
self->parent.set_enum = avro_resolved_wunion_reader_set_enum;
self->parent.set_fixed = avro_resolved_wunion_reader_set_fixed;
self->parent.give_fixed = avro_resolved_wunion_reader_give_fixed;
self->parent.get_size = avro_resolved_wunion_reader_get_size;
self->parent.get_by_index = avro_resolved_wunion_reader_get_by_index;
self->parent.get_by_name = avro_resolved_wunion_reader_get_by_name;
self->parent.get_discriminant = avro_resolved_wunion_reader_get_discriminant;
self->parent.get_current_branch = avro_resolved_wunion_reader_get_current_branch;
self->parent.append = avro_resolved_wunion_reader_append;
self->parent.add = avro_resolved_wunion_reader_add;
self->parent.set_branch = avro_resolved_wunion_reader_set_branch;
self->refcount = 1;
self->wschema = avro_schema_incref(wschema);
self->rschema = avro_schema_incref(rschema);
self->calculate_size = avro_resolved_wunion_reader_calculate_size;
self->free_iface = avro_resolved_wunion_reader_free_iface;
self->init = avro_resolved_wunion_reader_init;
self->done = avro_resolved_wunion_reader_done;
self->reset_wrappers = avro_resolved_wunion_reader_reset;
return container_of(self, avro_resolved_wunion_reader_t, parent);
}
static avro_resolved_reader_t *
try_writer_union(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
/*
* For a writer union, we check each branch of the union in turn
* against the reader schema. For each one that is compatible,
* we save the child resolver that can be used to process a
* writer value of that branch.
*/
size_t branch_count = avro_schema_union_size(wschema);
AVRO_DEBUG("Checking %" PRIsz "-branch writer union schema", branch_count);
avro_resolved_wunion_reader_t *uself =
avro_resolved_wunion_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, uself);
avro_resolved_reader_t **branch_resolvers =
(avro_resolved_reader_t **) avro_calloc(branch_count, sizeof(avro_resolved_reader_t *));
int some_branch_compatible = 0;
size_t i;
for (i = 0; i < branch_count; i++) {
avro_schema_t branch_schema =
avro_schema_union_branch(wschema, i);
AVRO_DEBUG("Resolving writer union branch %" PRIsz " (%s)", i,
avro_schema_type_name(branch_schema));
/*
* Try to recursively resolve this branch of the writer
* union against the reader schema. Don't raise
* an error if this fails — we just need one of
* the writer branches to be compatible.
*/
branch_resolvers[i] =
avro_resolved_reader_new_memoized(state, branch_schema, rschema);
if (branch_resolvers[i] == NULL) {
AVRO_DEBUG("No match for writer union branch %" PRIsz, i);
} else {
AVRO_DEBUG("Found match for writer union branch %" PRIsz, i);
some_branch_compatible = 1;
}
}
/*
* If we didn't find a match, that's an error.
*/
if (!some_branch_compatible) {
AVRO_DEBUG("No writer union branches match");
avro_set_error("No branches in the writer are compatible "
"with reader schema %s",
avro_schema_type_name(rschema));
goto error;
}
uself->branch_count = branch_count;
uself->branch_resolvers = branch_resolvers;
return &uself->parent;
error:
/*
* Clean up any resolver we might have already created.
*/
avro_memoize_delete(&state->mem, wschema, rschema);
avro_value_iface_decref(&uself->parent.parent);
{
unsigned int i;
for (i = 0; i < branch_count; i++) {
if (branch_resolvers[i]) {
avro_value_iface_decref(&branch_resolvers[i]->parent);
}
}
}
avro_free(branch_resolvers, branch_count * sizeof(avro_resolved_reader_t *));
return NULL;
}
/*-----------------------------------------------------------------------
* reader union
*/
/*
* For reader unions, we only resolve them against writers which aren't
* unions. (We'll have already broken any writer union apart into its
* separate branches.) We just have to record which branch of the
* reader union the writer schema is compatible with.
*/
typedef struct avro_resolved_runion_reader {
avro_resolved_reader_t parent;
/* The reader union branch that's compatible with the writer
* schema. */
size_t active_branch;
/* A child resolver for the reader branch. */
avro_resolved_reader_t *branch_resolver;
} avro_resolved_runion_reader_t;
static void
avro_resolved_runion_reader_calculate_size(avro_resolved_reader_t *iface)
{
avro_resolved_runion_reader_t *uiface =
container_of(iface, avro_resolved_runion_reader_t, parent);
/* Only calculate the size for any resolver once */
iface->calculate_size = NULL;
AVRO_DEBUG("Calculating size for %s->%s",
avro_schema_type_name((iface)->wschema),
avro_schema_type_name((iface)->rschema));
avro_resolved_reader_calculate_size(uiface->branch_resolver);
iface->instance_size = uiface->branch_resolver->instance_size;
}
static void
avro_resolved_runion_reader_free_iface(avro_resolved_reader_t *iface, st_table *freeing)
{
avro_resolved_runion_reader_t *uiface =
container_of(iface, avro_resolved_runion_reader_t, parent);
if (uiface->branch_resolver != NULL) {
free_resolver(uiface->branch_resolver, freeing);
}
avro_schema_decref(iface->wschema);
avro_schema_decref(iface->rschema);
avro_freet(avro_resolved_runion_reader_t, iface);
}
static int
avro_resolved_runion_reader_init(const avro_resolved_reader_t *iface, void *vself)
{
avro_resolved_runion_reader_t *uiface =
container_of(iface, avro_resolved_runion_reader_t, parent);
return avro_resolved_reader_init(uiface->branch_resolver, vself);
}
static void
avro_resolved_runion_reader_done(const avro_resolved_reader_t *iface, void *vself)
{
avro_resolved_runion_reader_t *uiface =
container_of(iface, avro_resolved_runion_reader_t, parent);
avro_resolved_reader_done(uiface->branch_resolver, vself);
}
static int
avro_resolved_runion_reader_reset(const avro_resolved_reader_t *iface, void *vself)
{
avro_resolved_runion_reader_t *uiface =
container_of(iface, avro_resolved_runion_reader_t, parent);
return avro_resolved_reader_reset_wrappers(uiface->branch_resolver, vself);
}
static int
avro_resolved_runion_reader_get_discriminant(const avro_value_iface_t *viface,
const void *vself, int *out)
{
AVRO_UNUSED(vself);
const avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
const avro_resolved_runion_reader_t *uiface =
container_of(iface, avro_resolved_runion_reader_t, parent);
AVRO_DEBUG("Reader union is branch %" PRIsz, uiface->active_branch);
*out = uiface->active_branch;
return 0;
}
static int
avro_resolved_runion_reader_get_current_branch(const avro_value_iface_t *viface,
const void *vself, avro_value_t *branch)
{
const avro_resolved_reader_t *iface =
container_of(viface, avro_resolved_reader_t, parent);
const avro_resolved_runion_reader_t *uiface =
container_of(iface, avro_resolved_runion_reader_t, parent);
AVRO_DEBUG("Getting reader branch %" PRIsz " for union %p", uiface->active_branch, vself);
branch->iface = &uiface->branch_resolver->parent;
branch->self = (void *) vself;
return 0;
}
static avro_resolved_runion_reader_t *
avro_resolved_runion_reader_create(avro_schema_t wschema, avro_schema_t rschema)
{
avro_resolved_reader_t *self = (avro_resolved_reader_t *) avro_new(avro_resolved_runion_reader_t);
memset(self, 0, sizeof(avro_resolved_runion_reader_t));
self->parent.incref_iface = avro_resolved_reader_incref_iface;
self->parent.decref_iface = avro_resolved_reader_decref_iface;
self->parent.incref = avro_resolved_reader_incref;
self->parent.decref = avro_resolved_reader_decref;
self->parent.reset = avro_resolved_reader_reset;
self->parent.get_type = avro_resolved_reader_get_type;
self->parent.get_schema = avro_resolved_reader_get_schema;
self->parent.get_discriminant = avro_resolved_runion_reader_get_discriminant;
self->parent.get_current_branch = avro_resolved_runion_reader_get_current_branch;
self->refcount = 1;
self->wschema = avro_schema_incref(wschema);
self->rschema = avro_schema_incref(rschema);
self->calculate_size = avro_resolved_runion_reader_calculate_size;
self->free_iface = avro_resolved_runion_reader_free_iface;
self->init = avro_resolved_runion_reader_init;
self->done = avro_resolved_runion_reader_done;
self->reset_wrappers = avro_resolved_runion_reader_reset;
return container_of(self, avro_resolved_runion_reader_t, parent);
}
static avro_resolved_reader_t *
try_reader_union(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
/*
* For a reader union, we have to identify which branch
* corresponds to the writer schema. (The writer won't be a
* union, since we'll have already broken it into its branches.)
*/
size_t branch_count = avro_schema_union_size(rschema);
AVRO_DEBUG("Checking %" PRIsz "-branch reader union schema", branch_count);
avro_resolved_runion_reader_t *uself =
avro_resolved_runion_reader_create(wschema, rschema);
avro_memoize_set(&state->mem, wschema, rschema, uself);
size_t i;
for (i = 0; i < branch_count; i++) {
avro_schema_t branch_schema =
avro_schema_union_branch(rschema, i);
AVRO_DEBUG("Resolving reader union branch %" PRIsz " (%s)", i,
avro_schema_type_name(branch_schema));
/*
* Try to recursively resolve this branch of the reader
* union against the writer schema. Don't raise
* an error if this fails — we just need one of
* the reader branches to be compatible.
*/
uself->branch_resolver =
avro_resolved_reader_new_memoized(state, wschema, branch_schema);
if (uself->branch_resolver == NULL) {
AVRO_DEBUG("No match for reader union branch %" PRIsz, i);
} else {
AVRO_DEBUG("Found match for reader union branch %" PRIsz, i);
uself->active_branch = i;
return &uself->parent;
}
}
/*
* If we didn't find a match, that's an error.
*/
AVRO_DEBUG("No reader union branches match");
avro_set_error("No branches in the reader are compatible "
"with writer schema %s",
avro_schema_type_name(wschema));
goto error;
error:
/*
* Clean up any resolver we might have already created.
*/
avro_memoize_delete(&state->mem, wschema, rschema);
avro_value_iface_decref(&uself->parent.parent);
return NULL;
}
/*-----------------------------------------------------------------------
* Schema type dispatcher
*/
static avro_resolved_reader_t *
avro_resolved_reader_new_memoized(memoize_state_t *state,
avro_schema_t wschema, avro_schema_t rschema)
{
check_param(NULL, is_avro_schema(wschema), "writer schema");
check_param(NULL, is_avro_schema(rschema), "reader schema");
/*
* First see if we've already matched these two schemas. If so,
* just return that resolver.
*/
avro_resolved_reader_t *saved = NULL;
if (avro_memoize_get(&state->mem, wschema, rschema, (void **) &saved)) {
AVRO_DEBUG("Already resolved %s%s%s->%s%s%s",
is_avro_link(wschema)? "[": "",
avro_schema_type_name(wschema),
is_avro_link(wschema)? "]": "",
is_avro_link(rschema)? "[": "",
avro_schema_type_name(rschema),
is_avro_link(rschema)? "]": "");
return saved;
} else {
AVRO_DEBUG("Resolving %s%s%s->%s%s%s",
is_avro_link(wschema)? "[": "",
avro_schema_type_name(wschema),
is_avro_link(wschema)? "]": "",
is_avro_link(rschema)? "[": "",
avro_schema_type_name(rschema),
is_avro_link(rschema)? "]": "");
}
/*
* Otherwise we have some work to do. First check if the writer
* schema is a union. If so, break it apart.
*/
if (is_avro_union(wschema)) {
return try_writer_union(state, wschema, rschema);
}
else if (is_avro_link(wschema)) {
return try_wlink(state, wschema, rschema);
}
/*
* If the writer isn't a union, than choose a resolver based on
* the reader schema.
*/
switch (avro_typeof(rschema))
{
case AVRO_BOOLEAN:
return try_boolean(state, wschema, rschema);
case AVRO_BYTES:
return try_bytes(state, wschema, rschema);
case AVRO_DOUBLE:
return try_double(state, wschema, rschema);
case AVRO_FLOAT:
return try_float(state, wschema, rschema);
case AVRO_INT32:
return try_int(state, wschema, rschema);
case AVRO_INT64:
return try_long(state, wschema, rschema);
case AVRO_NULL:
return try_null(state, wschema, rschema);
case AVRO_STRING:
return try_string(state, wschema, rschema);
case AVRO_ARRAY:
return try_array(state, wschema, rschema);
case AVRO_ENUM:
return try_enum(state, wschema, rschema);
case AVRO_FIXED:
return try_fixed(state, wschema, rschema);
case AVRO_MAP:
return try_map(state, wschema, rschema);
case AVRO_RECORD:
return try_record(state, wschema, rschema);
case AVRO_UNION:
return try_reader_union(state, wschema, rschema);
case AVRO_LINK:
return try_rlink(state, wschema, rschema);
default:
avro_set_error("Unknown reader schema type");
return NULL;
}
return NULL;
}
avro_value_iface_t *
avro_resolved_reader_new(avro_schema_t wschema, avro_schema_t rschema)
{
/*
* Create a state to keep track of the value implementations
* that we create for each subschema.
*/
memoize_state_t state;
avro_memoize_init(&state.mem);
state.links = NULL;
/*
* Create the value implementations.
*/
avro_resolved_reader_t *result =
avro_resolved_reader_new_memoized(&state, wschema, rschema);
if (result == NULL) {
avro_memoize_done(&state.mem);
return NULL;
}
/*
* Fix up any link schemas so that their value implementations
* point to their target schemas' implementations.
*/
avro_resolved_reader_calculate_size(result);
while (state.links != NULL) {
avro_resolved_link_reader_t *liface = state.links;
avro_resolved_reader_calculate_size(liface->target_resolver);
state.links = liface->next;
liface->next = NULL;
}
/*
* And now we can return.
*/
avro_memoize_done(&state.mem);
return &result->parent;
}
| jlawton/ObjectiveAvro | Avro-C/src/resolved-reader.c | C | mit | 107,543 |
/*
* main.c
*
* Created on: 31 May 2016
* Author: ajuaristi <a@juaristi.eus>
*/
#include <stdio.h>
#include <signal.h>
#include <getopt.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <linux/videodev2.h>
#include <sys/stat.h>
#include "utils.h"
#include "appbase.h"
#include "uvc.h"
#define DEFAULT_WAIT_TIME 5
int stop;
#define SHOULD_STOP(v) (stop = v)
#define IS_STOPPED() (stop)
static char *create_debug_filename()
{
#define COUNT_LIMIT 9
static unsigned int count = 0;
char filename_template[] = "picture";
size_t size = sizeof(filename_template) + 2;
char *filename = ec_malloc(size);
snprintf(filename, size, "%s.%d", filename_template, count++);
if (count > COUNT_LIMIT)
count = 0;
return filename;
#undef COUNT_LIMIT
}
static void write_to_disk(const unsigned char *data, size_t size)
{
FILE *f;
char *filename = create_debug_filename();
f = fopen(filename, "w");
if (f) {
fwrite(data, size, 1, f);
fclose(f);
fprintf(stderr, "DEBUG: Frame written to file '%s'\n", filename);
}
free(filename);
}
static void print_usage_and_exit(const char *name)
{
if (name) {
printf("Usage: %s [OPTIONS] <app name> <username> <password>\n"
"Options:\n"
" -w secs Sleep this amount of seconds between shots\n"
" -d Display debug messages\n"
" -s Take one single shot and exit\n"
" -j Convert frames to JPEG\n"
" -S Stream as fast as possible\n",
name);
}
exit(1);
}
static void sighandler(int s)
{
char *signame;
switch (s) {
case SIGINT:
signame = "SIGINT";
break;
case SIGQUIT:
signame = "SIGQUIT";
break;
case SIGTERM:
signame = "SIGTERM";
break;
case SIGABRT:
signame = "SIGABRT";
break;
case SIGTRAP:
signame = "SIGTRAP";
break;
default:
signame = NULL;
break;
}
if (signame)
fprintf(stderr, "Received %s. Exiting.", signame);
else
fprintf(stderr, "Received %d. Exiting.", s);
/* Stop! */
SHOULD_STOP(1);
}
static void do_stream(struct appbase *ab, bool jpeg)
{
struct camera *c;
c = uvc_open();
if (!c)
fatal("Could not find any camera for capturing pictures");
c->frame = uvc_alloc_frame(320, 240, V4L2_PIX_FMT_YUYV);
if (!c->frame)
fatal("Could not allocate enough memory for frames");
if (!uvc_init(c))
fatal("Could not start camera for streaming");
while (!IS_STOPPED() && uvc_capture_frame(c)) {
if (jpeg)
frame_convert_yuyv_to_jpeg(c->frame);
if (!appbase_push_frame(ab,
c->frame->frame_data, c->frame->frame_bytes_used,
&c->frame->capture_time)) {
fprintf(stderr, "ERROR: Could not capture frame\n");
break;
}
c->frame->frame_bytes_used = 0;
}
uvc_close(c);
}
void do_capture(struct appbase *ab, unsigned int wait_time, bool oneshot, bool jpeg, bool debug)
{
struct camera *c;
struct frame *f;
while (!IS_STOPPED()) {
c = uvc_open();
if (!c)
fatal("Could not find any camera for capturing pictures");
c->frame = uvc_alloc_frame(320, 240, V4L2_PIX_FMT_YUYV);
if (!c->frame)
fatal("Could not allocate enough memory for frames");
if (!uvc_init(c))
fatal("Could not start camera for streaming");
if (uvc_capture_frame(c)) {
f = c->frame;
if (jpeg)
frame_convert_yuyv_to_jpeg(f);
if (!appbase_push_frame(ab,
f->frame_data, f->frame_bytes_used,
&f->capture_time))
fprintf(stderr, "ERROR: Could not send frame\n");
if (debug)
write_to_disk(f->frame_data, f->frame_bytes_used);
memset(f->frame_data, 0, f->frame_size);
f->frame_bytes_used = 0;
} else {
fprintf(stderr, "ERROR: Could not capture frame\n");
}
uvc_close(c);
if (oneshot)
break;
/*
* sleep(3) should not interfere with our signal handlers,
* unless we're also handling SIGALRM
*/
sleep(wait_time);
}
}
int main(int argc, char **argv)
{
int opt;
char *endptr;
long int wait_time = DEFAULT_WAIT_TIME;
bool debug = false, oneshot = false, stream = false, jpeg = false;
struct sigaction sig;
struct appbase *ab;
/* Parse command-line options */
while ((opt = getopt(argc, argv, "w:dsSj")) != -1) {
switch (opt) {
case 'w':
wait_time = strtol(optarg, &endptr, 10);
if (*endptr || wait_time < 0)
print_usage_and_exit(argv[0]);
break;
case 'd':
debug = true;
break;
case 's':
oneshot = true;
break;
case 'S':
stream = true;
break;
case 'j':
jpeg = true;
break;
default:
print_usage_and_exit(argv[0]);
break;
}
}
/* Set signal handlers and set stop condition to zero */
SHOULD_STOP(0);
memset(&sig, 0, sizeof(sig));
sig.sa_handler = sighandler;
sig.sa_flags = SA_RESETHAND;
sigemptyset(&sig.sa_mask);
sigaction(SIGINT, &sig, NULL);
sigaction(SIGQUIT, &sig, NULL);
sigaction(SIGTERM, &sig, NULL);
sigaction(SIGABRT, &sig, NULL);
sigaction(SIGTRAP, &sig, NULL);
/* Set up Appbase handle
* We need the app name, username and password to build the REST URL, and these
* should came now as parameters. We expect optind to point us to the first one.
*/
if (argc - optind < 3)
print_usage_and_exit(argv[0]);
ab = appbase_open(
argv[optind], // app name
argv[optind + 1], // username
argv[optind + 2], // password
false); // streaming off
if (!ab)
fatal("Could not log into Appbase");
if (debug) {
appbase_enable_progress(ab, true);
appbase_enable_verbose(ab, true);
}
if (stream)
do_stream(ab, jpeg);
else
do_capture(ab, wait_time, oneshot, jpeg, debug);
appbase_close(ab);
return 0;
}
| juaristi/appbase-cctv | daemon-main.c | C | mit | 5,533 |
6650 #include "types.h"
6651 #include "x86.h"
6652
6653 void*
6654 memset(void *dst, int c, uint n)
6655 {
6656 if ((int)dst%4 == 0 && n%4 == 0){
6657 c &= 0xFF;
6658 stosl(dst, (c<<24)|(c<<16)|(c<<8)|c, n/4);
6659 } else
6660 stosb(dst, c, n);
6661 return dst;
6662 }
6663
6664 int
6665 memcmp(const void *v1, const void *v2, uint n)
6666 {
6667 const uchar *s1, *s2;
6668
6669 s1 = v1;
6670 s2 = v2;
6671 while(n-- > 0){
6672 if(*s1 != *s2)
6673 return *s1 - *s2;
6674 s1++, s2++;
6675 }
6676
6677 return 0;
6678 }
6679
6680 void*
6681 memmove(void *dst, const void *src, uint n)
6682 {
6683 const char *s;
6684 char *d;
6685
6686 s = src;
6687 d = dst;
6688 if(s < d && s + n > d){
6689 s += n;
6690 d += n;
6691 while(n-- > 0)
6692 *--d = *--s;
6693 } else
6694 while(n-- > 0)
6695 *d++ = *s++;
6696
6697 return dst;
6698 }
6699
6700 // memcpy exists to placate GCC. Use memmove.
6701 void*
6702 memcpy(void *dst, const void *src, uint n)
6703 {
6704 return memmove(dst, src, n);
6705 }
6706
6707 int
6708 strncmp(const char *p, const char *q, uint n)
6709 {
6710 while(n > 0 && *p && *p == *q)
6711 n--, p++, q++;
6712 if(n == 0)
6713 return 0;
6714 return (uchar)*p - (uchar)*q;
6715 }
6716
6717 char*
6718 strncpy(char *s, const char *t, int n)
6719 {
6720 char *os;
6721
6722 os = s;
6723 while(n-- > 0 && (*s++ = *t++) != 0)
6724 ;
6725 while(n-- > 0)
6726 *s++ = 0;
6727 return os;
6728 }
6729
6730 // Like strncpy but guaranteed to NUL-terminate.
6731 char*
6732 safestrcpy(char *s, const char *t, int n)
6733 {
6734 char *os;
6735
6736 os = s;
6737 if(n <= 0)
6738 return os;
6739 while(--n > 0 && (*s++ = *t++) != 0)
6740 ;
6741 *s = 0;
6742 return os;
6743 }
6744
6745
6746
6747
6748
6749
6750 int
6751 strlen(const char *s)
6752 {
6753 int n;
6754
6755 for(n = 0; s[n]; n++)
6756 ;
6757 return n;
6758 }
6759
6760
6761
6762
6763
6764
6765
6766
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779
6780
6781
6782
6783
6784
6785
6786
6787
6788
6789
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799
| animesh2049/xv6 | fmt/string.c | C | mit | 2,236 |
#include<stdio.h>
#include<math.h>
#include<conio.h>
#include<ctype.h>
int i,j,cash=100;
void wheel();
void game(int r);
void game_over();
int suit_bet();
int cash_bet();
int roll_wheel();
int roll_dice();
void wheel_count(int c,int h,int s);
void dice_count(int d,int h);
int w[9][9]={
{32,32,32,32,2,32,32,32,32},
{32,32,32,3,5,4,32,32,32},
{32,32,5,4,3,6,5,32,32},
{32,4,3,6,5,3,4,6,32},
{2,6,5,4,15,5,3,6,2},
{32,3,6,3,4,5,4,3,32},
{32,32,5,6,3,4,5,32,32},
{32,32,32,4,6,6,32,32,32},
{32,32,32,32,2,32,32,32,32}
};
void main(){
int round;
char e;
//game intro
printf("\t\t\tWelcome to Roullette\n\n");
//game instruction
printf("Game Instructions:\n\n");
printf("Diamond(d)=%c Hearts(h)=%c Clubs(c)=%c Spades(s)=%c Jack(j)=%c Bull's Eye=%c \n",4,3,5,6,2,15);
printf("\n-The game starts with $100 \n-You chooses how many rounds to play \n-Then bet with cash on Suits,Jack and Null spaces of the wheel on which the dice will hit \n-A dice is thrown \n-If the dice hits the betting suit then you earn the betting cash.\n");
printf("-If the dice hits suits other than the betting one then you lose $10\n");
printf("-If it hits any of the Null spaces which is not bet then you lose bet cash \n");
printf("-If it hits the Jack which is bet then you earn the beting cash + $100 otherwise you earn only the bet cash \n");
printf("-Your cash is doubled if you hit the Bull's Eye \n");
printf("\n\n");
printf("Press Enter to Start Game");
scanf("%c",&e);
if(e=='\n'){
printf("\nThe Roullette Wheel: \n\n");
wheel();
printf("\n\nYour Cash: $%d",cash);
printf("\n\nHow many rounds you want to play: ");
scanf("%d",&round);
printf("\n\nYour Cash : $%d \n",cash);
game(round);
printf("\n\n");
printf("\t %d rounds completed!! \n\n\tYou Earned Total $%d !!\n\n",round,cash);
}
else{
printf("\nSorry!!\nYou Entered The Wrong Key!!\n");
}
}
//game on
void game(int r){
int count;
for(count=1;count<=r;count++){
int suit,ca,hit,dice;
fflush(stdin);
suit=suit_bet();
ca=cash_bet();
hit=roll_wheel();
dice=roll_dice();
wheel_count(ca,hit,suit);
dice_count(dice,hit);
printf("\n");
wheel();
printf("\n\nCash: $%d \nSuit Bet: %c \nCash Bet: $%d \nWheel Hit: %c \nDice: %d\n\n\n",cash,suit,ca,hit,dice);
}
}
//show wheel
void wheel(){
for(i=0;i<9;i++){
for(j=0;j<9;j++){
printf("%c ",w[i][j]);
}
printf("\n");
}
}
//betting on suit
int suit_bet(){
char s;
int k;
printf("Suit Bet: ");
s=getchar();
s=tolower(s);
switch(s){
case 'h':
k=3;
break;
case 'd':
k=4;
break;
case 'c':
k=5;
break;
case 's':
k=6;
break;
case 'j':
k=2;
break;
case 'n':
k=32;
break;
default:
k=0;
}
return k;
}
//betting on cash
int cash_bet(){
int c;
printf("Cash Bet: $");
scanf("%d",&c);
return c;
}
//rolling the wheel
int roll_wheel(){
float m,n;
int wh1,wh2,res;
m=rand()/32768.0;
n=rand()/32768.0;
wh1=(int) (m*9);
wh2=(int) (n*9);
res=w[wh1][wh2];
w[wh1][wh2]=249;
return res;
}
//rolling the dice
int roll_dice(){
float d;
int res;
d=rand()/32768.0;
res=(int) (d*6)+1;
return res;
}
//cash update form wheel hit
void wheel_count(int c,int h,int s){
if(h==s){
if(h==2){
cash=cash+c+100;
}else{
cash=cash+c;
}
}
else if(h==3 || h==4 || h==5 || h==6){
cash=cash-10;
}
else if(h==32){
cash=cash-c;
}
else if(h==2){
cash=cash+c;
}
if(s==2 && h!=2){
cash=cash-50;
}
}
//cash update from dice throw
void dice_count(int d,int h){
if(h==3 || h==4 || h==5 || h==6){
if(d==6){
cash=cash+20;
}
}
else if(h==15){
cash=2*cash;
}
else if(h==249){
if(d==6){
cash=cash+20;
}
}
}
//game end/over
| abrarShariar/Roll-the-dice | final.c | C | mit | 4,262 |
#include "utlua.h"
#ifdef __linux__
#include <limits.h>
#include <linux/netfilter_ipv4.h>
#endif
#include <net/if.h>
#define LUA_TCPD_CONNECTION_TYPE "<tcpd.connect>"
#define LUA_TCPD_SERVER_TYPE "<tcpd.bind %s %d>"
#define LUA_TCPD_ACCEPT_TYPE "<tcpd.accept %s %d>"
#if FAN_HAS_OPENSSL
typedef struct
{
SSL_CTX *ssl_ctx;
char *key;
int retainCount;
} SSLCTX;
#endif
typedef struct
{
struct bufferevent *buf;
#if FAN_HAS_OPENSSL
SSLCTX *sslctx;
int ssl_verifyhost;
int ssl_verifypeer;
const char *ssl_error;
#endif
lua_State *mainthread;
int onReadRef;
int onSendReadyRef;
int onDisconnectedRef;
int onConnectedRef;
char *host;
char *ssl_host;
int port;
int send_buffer_size;
int receive_buffer_size;
int interface;
lua_Number read_timeout;
lua_Number write_timeout;
} Conn;
#if FAN_HAS_OPENSSL
#define VERIFY_DEPTH 5
static int conn_index = 0;
#endif
typedef struct
{
struct evconnlistener *listener;
lua_State *mainthread;
int onAcceptRef;
int onSSLHostNameRef;
char *host;
int port;
int ipv6;
#if FAN_HAS_OPENSSL
int ssl;
SSL_CTX *ctx;
EC_KEY *ecdh;
#endif
int send_buffer_size;
int receive_buffer_size;
} SERVER;
typedef struct
{
struct bufferevent *buf;
lua_State *mainthread;
int onReadRef;
int onSendReadyRef;
int selfRef;
char ip[INET6_ADDRSTRLEN];
int port;
int onDisconnectedRef;
} ACCEPT;
#define TCPD_ACCEPT_UNREF(accept) \
CLEAR_REF(accept->mainthread, accept->onSendReadyRef) \
CLEAR_REF(accept->mainthread, accept->onReadRef) \
CLEAR_REF(accept->mainthread, accept->onDisconnectedRef) \
CLEAR_REF(accept->mainthread, accept->selfRef)
LUA_API int lua_tcpd_server_close(lua_State *L)
{
SERVER *serv = luaL_checkudata(L, 1, LUA_TCPD_SERVER_TYPE);
CLEAR_REF(L, serv->onAcceptRef)
CLEAR_REF(L, serv->onSSLHostNameRef)
if (serv->host)
{
free(serv->host);
serv->host = NULL;
}
if (event_mgr_base_current() && serv->listener)
{
evconnlistener_free(serv->listener);
serv->listener = NULL;
}
#if FAN_HAS_OPENSSL
if (serv->ctx)
{
SSL_CTX_free(serv->ctx);
EC_KEY_free(serv->ecdh);
serv->ctx = NULL;
serv->ecdh = NULL;
}
#endif
return 0;
}
LUA_API int lua_tcpd_server_gc(lua_State *L)
{
return lua_tcpd_server_close(L);
}
LUA_API int lua_tcpd_accept_tostring(lua_State *L)
{
ACCEPT *accept = luaL_checkudata(L, 1, LUA_TCPD_ACCEPT_TYPE);
lua_pushfstring(L, LUA_TCPD_ACCEPT_TYPE, accept->ip, accept->port);
return 1;
}
LUA_API int lua_tcpd_server_tostring(lua_State *L)
{
SERVER *serv = luaL_checkudata(L, 1, LUA_TCPD_SERVER_TYPE);
if (serv->listener)
{
char host[INET6_ADDRSTRLEN];
regress_get_socket_host(evconnlistener_get_fd(serv->listener), host);
lua_pushfstring(
L, LUA_TCPD_SERVER_TYPE, host,
regress_get_socket_port(evconnlistener_get_fd(serv->listener)));
}
else
{
lua_pushfstring(L, LUA_TCPD_SERVER_TYPE, 0);
}
return 1;
}
static void tcpd_accept_eventcb(struct bufferevent *bev, short events,
void *arg)
{
ACCEPT *accept = (ACCEPT *)arg;
if (events & BEV_EVENT_ERROR || events & BEV_EVENT_EOF ||
events & BEV_EVENT_TIMEOUT)
{
if (events & BEV_EVENT_ERROR)
{
#if DEBUG
printf("BEV_EVENT_ERROR %s\n",
evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
#endif
}
bufferevent_free(bev);
accept->buf = NULL;
if (accept->onDisconnectedRef != LUA_NOREF)
{
lua_State *mainthread = accept->mainthread;
lua_lock(mainthread);
lua_State *co = lua_newthread(mainthread);
PUSH_REF(mainthread);
lua_unlock(mainthread);
lua_rawgeti(co, LUA_REGISTRYINDEX, accept->onDisconnectedRef);
if (events & BEV_EVENT_ERROR && EVUTIL_SOCKET_ERROR())
{
lua_pushstring(co,
evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
}
else if (events & BEV_EVENT_TIMEOUT)
{
lua_pushstring(co, "timeout");
}
else if (events & BEV_EVENT_EOF)
{
lua_pushstring(co, "client disconnected");
}
else
{
lua_pushnil(co);
}
CLEAR_REF(mainthread, accept->onDisconnectedRef)
FAN_RESUME(co, mainthread, 1);
POP_REF(mainthread);
}
TCPD_ACCEPT_UNREF(accept)
}
else
{
}
}
#define BUFLEN 1024
static void tcpd_accept_readcb(struct bufferevent *bev, void *ctx)
{
ACCEPT *accept = (ACCEPT *)ctx;
char buf[BUFLEN];
int n;
BYTEARRAY ba = {0};
bytearray_alloc(&ba, BUFLEN * 2);
struct evbuffer *input = bufferevent_get_input(bev);
while ((n = evbuffer_remove(input, buf, sizeof(buf))) > 0)
{
bytearray_writebuffer(&ba, buf, n);
}
bytearray_read_ready(&ba);
if (accept->onReadRef != LUA_NOREF)
{
lua_State *mainthread = accept->mainthread;
lua_lock(mainthread);
lua_State *co = lua_newthread(mainthread);
PUSH_REF(mainthread);
lua_unlock(mainthread);
lua_rawgeti(co, LUA_REGISTRYINDEX, accept->onReadRef);
lua_pushlstring(co, (const char *)ba.buffer, ba.total);
FAN_RESUME(co, mainthread, 1);
POP_REF(mainthread);
}
bytearray_dealloc(&ba);
}
static void tcpd_accept_writecb(struct bufferevent *bev, void *ctx)
{
ACCEPT *accept = (ACCEPT *)ctx;
if (evbuffer_get_length(bufferevent_get_output(bev)) == 0)
{
if (accept->onSendReadyRef != LUA_NOREF)
{
lua_State *mainthread = accept->mainthread;
lua_lock(mainthread);
lua_State *co = lua_newthread(mainthread);
PUSH_REF(mainthread);
lua_unlock(mainthread);
lua_rawgeti(co, LUA_REGISTRYINDEX, accept->onSendReadyRef);
FAN_RESUME(co, mainthread, 0);
POP_REF(mainthread);
}
}
}
void connlistener_cb(struct evconnlistener *listener, evutil_socket_t fd,
struct sockaddr *addr, int socklen, void *arg)
{
SERVER *serv = (SERVER *)arg;
if (serv->onAcceptRef != LUA_NOREF)
{
lua_State *mainthread = serv->mainthread;
lua_lock(mainthread);
lua_State *co = lua_newthread(mainthread);
PUSH_REF(mainthread);
lua_unlock(mainthread);
lua_rawgeti(co, LUA_REGISTRYINDEX, serv->onAcceptRef);
ACCEPT *accept = lua_newuserdata(co, sizeof(ACCEPT));
memset(accept, 0, sizeof(ACCEPT));
accept->buf = NULL;
accept->mainthread = mainthread;
accept->selfRef = LUA_NOREF;
accept->onReadRef = LUA_NOREF;
accept->onSendReadyRef = LUA_NOREF;
accept->onDisconnectedRef = LUA_NOREF;
luaL_getmetatable(co, LUA_TCPD_ACCEPT_TYPE);
lua_setmetatable(co, -2);
struct event_base *base = evconnlistener_get_base(listener);
struct bufferevent *bev;
#if FAN_HAS_OPENSSL
if (serv->ssl && serv->ctx)
{
bev = bufferevent_openssl_socket_new(
base, fd, SSL_new(serv->ctx), BUFFEREVENT_SSL_ACCEPTING,
BEV_OPT_CLOSE_ON_FREE | BEV_OPT_DEFER_CALLBACKS);
}
else
{
#endif
bev = bufferevent_socket_new(base, fd, BEV_OPT_CLOSE_ON_FREE | BEV_OPT_DEFER_CALLBACKS);
#if FAN_HAS_OPENSSL
}
#endif
bufferevent_setcb(bev, tcpd_accept_readcb, tcpd_accept_writecb,
tcpd_accept_eventcb, accept);
bufferevent_enable(bev, EV_READ | EV_WRITE);
if (serv->send_buffer_size)
{
setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &serv->send_buffer_size,
sizeof(serv->send_buffer_size));
}
if (serv->receive_buffer_size)
{
setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &serv->receive_buffer_size,
sizeof(serv->receive_buffer_size));
}
memset(accept->ip, 0, INET6_ADDRSTRLEN);
if (addr->sa_family == AF_INET)
{
struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
inet_ntop(addr_in->sin_family, (void *)&(addr_in->sin_addr), accept->ip,
INET_ADDRSTRLEN);
accept->port = ntohs(addr_in->sin_port);
}
else
{
struct sockaddr_in6 *addr_in = (struct sockaddr_in6 *)addr;
inet_ntop(addr_in->sin6_family, (void *)&(addr_in->sin6_addr), accept->ip,
INET6_ADDRSTRLEN);
accept->port = ntohs(addr_in->sin6_port);
}
accept->buf = bev;
FAN_RESUME(co, mainthread, 1);
POP_REF(mainthread);
}
}
LUA_API int tcpd_accept_bind(lua_State *L)
{
ACCEPT *accept = luaL_checkudata(L, 1, LUA_TCPD_ACCEPT_TYPE);
luaL_checktype(L, 2, LUA_TTABLE);
lua_settop(L, 2);
lua_pushvalue(L, 1);
accept->selfRef = luaL_ref(L, LUA_REGISTRYINDEX);
SET_FUNC_REF_FROM_TABLE(L, accept->onReadRef, 2, "onread")
SET_FUNC_REF_FROM_TABLE(L, accept->onSendReadyRef, 2, "onsendready")
SET_FUNC_REF_FROM_TABLE(L, accept->onDisconnectedRef, 2, "ondisconnected")
lua_pushstring(L, accept->ip);
lua_pushinteger(L, accept->port);
return 2;
}
#if FAN_HAS_OPENSSL
static int ssl_servername_cb(SSL *s, int *ad, void *arg)
{
const char *hostname = SSL_get_servername(s, TLSEXT_NAMETYPE_host_name);
// if (hostname)
// printf("Hostname in TLS extension: \"%s\"\n", hostname);
SERVER *serv = (SERVER *)arg;
if (hostname && serv->onSSLHostNameRef != LUA_NOREF)
{
lua_State *mainthread = serv->mainthread;
lua_lock(mainthread);
lua_State *co = lua_newthread(mainthread);
PUSH_REF(mainthread);
lua_unlock(mainthread);
lua_rawgeti(co, LUA_REGISTRYINDEX, serv->onSSLHostNameRef);
lua_pushstring(co, hostname);
FAN_RESUME(co, mainthread, 1);
POP_REF(mainthread);
}
// if (!p->servername)
// return SSL_TLSEXT_ERR_NOACK;
// if (servername) {
// if (strcasecmp(servername, p->servername))
// return p->extension_error;
// }
return SSL_TLSEXT_ERR_OK;
}
#endif
static void tcpd_server_rebind(lua_State *L, SERVER *serv)
{
if (serv->listener)
{
evconnlistener_free(serv->listener);
serv->listener = NULL;
}
if (serv->host)
{
char portbuf[6];
evutil_snprintf(portbuf, sizeof(portbuf), "%d", serv->port);
struct evutil_addrinfo hints = {0};
struct evutil_addrinfo *answer = NULL;
hints.ai_family = serv->ipv6 ? AF_INET6 : AF_INET;
hints.ai_socktype = SOCK_DGRAM;
hints.ai_protocol = IPPROTO_UDP;
hints.ai_flags = EVUTIL_AI_ADDRCONFIG;
int err = evutil_getaddrinfo(serv->host, portbuf, &hints, &answer);
if (err < 0 || !answer)
{
luaL_error(L, "invaild bind address %s:%d", serv->host, serv->port);
}
serv->listener =
evconnlistener_new_bind(event_mgr_base(), connlistener_cb, serv,
LEV_OPT_CLOSE_ON_FREE | LEV_OPT_REUSEABLE, -1,
answer->ai_addr, answer->ai_addrlen);
evutil_freeaddrinfo(answer);
}
else
{
struct sockaddr *addr = NULL;
size_t addr_size = 0;
struct sockaddr_in sin;
struct sockaddr_in6 sin6;
memset(&sin, 0, sizeof(sin));
memset(&sin6, 0, sizeof(sin6));
if (!serv->ipv6)
{
addr = (struct sockaddr *)&sin;
addr_size = sizeof(sin);
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = htonl(0);
sin.sin_port = htons(serv->port);
}
else
{
addr = (struct sockaddr *)&sin6;
addr_size = sizeof(sin6);
sin6.sin6_family = AF_INET6;
// sin6.sin6_addr.s6_addr
sin6.sin6_port = htons(serv->port);
}
serv->listener = evconnlistener_new_bind(
event_mgr_base(), connlistener_cb, serv,
LEV_OPT_CLOSE_ON_FREE | LEV_OPT_REUSEABLE, -1, addr, addr_size);
}
}
LUA_API int lua_tcpd_server_rebind(lua_State *L)
{
SERVER *serv = luaL_checkudata(L, 1, LUA_TCPD_SERVER_TYPE);
tcpd_server_rebind(L, serv);
return 0;
}
LUA_API int tcpd_bind(lua_State *L)
{
event_mgr_init();
luaL_checktype(L, 1, LUA_TTABLE);
lua_settop(L, 1);
SERVER *serv = lua_newuserdata(L, sizeof(SERVER));
memset(serv, 0, sizeof(SERVER));
luaL_getmetatable(L, LUA_TCPD_SERVER_TYPE);
lua_setmetatable(L, -2);
serv->mainthread = utlua_mainthread(L);
SET_FUNC_REF_FROM_TABLE(L, serv->onAcceptRef, 1, "onaccept")
SET_FUNC_REF_FROM_TABLE(L, serv->onSSLHostNameRef, 1, "onsslhostname")
DUP_STR_FROM_TABLE(L, serv->host, 1, "host")
SET_INT_FROM_TABLE(L, serv->port, 1, "port")
lua_getfield(L, 1, "ssl");
int ssl = lua_toboolean(L, -1);
lua_pop(L, 1);
#if FAN_HAS_OPENSSL
serv->ssl = ssl;
if (serv->ssl)
{
lua_getfield(L, 1, "cert");
const char *cert = lua_tostring(L, -1);
lua_getfield(L, 1, "key");
const char *key = lua_tostring(L, -1);
if (cert && key)
{
SSL_CTX *ctx = SSL_CTX_new(TLS_server_method());
SSL_CTX_set_tlsext_servername_callback(ctx, ssl_servername_cb);
SSL_CTX_set_tlsext_servername_arg(ctx, serv);
serv->ctx = ctx;
SSL_CTX_set_options(ctx,
SSL_OP_SINGLE_DH_USE | SSL_OP_SINGLE_ECDH_USE |
0); // SSL_OP_NO_SSLv2
serv->ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
if (!serv->ecdh)
{
die_most_horribly_from_openssl_error("EC_KEY_new_by_curve_name");
}
if (1 != SSL_CTX_set_tmp_ecdh(ctx, serv->ecdh))
{
die_most_horribly_from_openssl_error("SSL_CTX_set_tmp_ecdh");
}
server_setup_certs(ctx, cert, key);
}
lua_pop(L, 2);
}
#else
if (ssl)
{
luaL_error(L, "ssl is not supported on micro version.");
}
#endif
SET_INT_FROM_TABLE(L, serv->send_buffer_size, 1, "send_buffer_size")
SET_INT_FROM_TABLE(L, serv->receive_buffer_size, 1, "receive_buffer_size")
lua_getfield(L, 1, "ipv6");
serv->ipv6 = lua_toboolean(L, -1);
lua_pop(L, 1);
tcpd_server_rebind(L, serv);
if (!serv->listener)
{
return 0;
}
else
{
if (!serv->port)
{
serv->port = regress_get_socket_port(evconnlistener_get_fd(serv->listener));
}
lua_pushinteger(L, serv->port);
return 2;
}
}
static void tcpd_conn_readcb(struct bufferevent *bev, void *ctx)
{
Conn *conn = (Conn *)ctx;
char buf[BUFLEN];
int n;
BYTEARRAY ba;
bytearray_alloc(&ba, BUFLEN * 2);
struct evbuffer *input = bufferevent_get_input(bev);
while ((n = evbuffer_remove(input, buf, sizeof(buf))) > 0)
{
bytearray_writebuffer(&ba, buf, n);
}
bytearray_read_ready(&ba);
if (conn->onReadRef != LUA_NOREF)
{
lua_State *mainthread = conn->mainthread;
lua_lock(mainthread);
lua_rawgeti(mainthread, LUA_REGISTRYINDEX, conn->onReadRef);
lua_State *co = lua_newthread(mainthread);
PUSH_REF(mainthread);
lua_xmove(mainthread, co, 1);
lua_unlock(mainthread);
lua_pushlstring(co, (const char *)ba.buffer, ba.total);
FAN_RESUME(co, mainthread, 1);
POP_REF(mainthread);
}
bytearray_dealloc(&ba);
}
static void tcpd_conn_writecb(struct bufferevent *bev, void *ctx)
{
Conn *conn = (Conn *)ctx;
if (evbuffer_get_length(bufferevent_get_output(bev)) == 0)
{
if (conn->onSendReadyRef != LUA_NOREF)
{
lua_State *mainthread = conn->mainthread;
lua_lock(mainthread);
lua_State *co = lua_newthread(mainthread);
PUSH_REF(mainthread);
lua_unlock(mainthread);
lua_rawgeti(co, LUA_REGISTRYINDEX, conn->onSendReadyRef);
FAN_RESUME(co, mainthread, 0);
POP_REF(mainthread);
}
}
}
static void tcpd_conn_eventcb(struct bufferevent *bev, short events,
void *arg)
{
Conn *conn = (Conn *)arg;
if (events & BEV_EVENT_CONNECTED)
{
// printf("tcp connected.\n");
if (conn->onConnectedRef != LUA_NOREF)
{
lua_State *mainthread = conn->mainthread;
lua_lock(mainthread);
lua_State *co = lua_newthread(mainthread);
PUSH_REF(mainthread);
lua_unlock(mainthread);
lua_rawgeti(co, LUA_REGISTRYINDEX, conn->onConnectedRef);
FAN_RESUME(co, mainthread, 0);
POP_REF(mainthread);
}
}
else if (events & BEV_EVENT_ERROR || events & BEV_EVENT_EOF ||
events & BEV_EVENT_TIMEOUT)
{
#if FAN_HAS_OPENSSL
SSL *ssl = bufferevent_openssl_get_ssl(bev);
if (ssl)
{
SSL_set_shutdown(ssl, SSL_RECEIVED_SHUTDOWN);
SSL_shutdown(ssl);
}
#endif
bufferevent_free(bev);
conn->buf = NULL;
if (conn->onDisconnectedRef != LUA_NOREF)
{
lua_State *mainthread = conn->mainthread;
lua_lock(mainthread);
lua_State *co = lua_newthread(mainthread);
PUSH_REF(mainthread);
lua_unlock(mainthread);
lua_rawgeti(co, LUA_REGISTRYINDEX, conn->onDisconnectedRef);
if (events & BEV_EVENT_TIMEOUT)
{
if (events & BEV_EVENT_READING)
{
lua_pushliteral(co, "read timeout");
}
else if (events & BEV_EVENT_WRITING)
{
lua_pushliteral(co, "write timeout");
}
else
{
lua_pushliteral(co, "unknown timeout");
}
}
else if (events & BEV_EVENT_ERROR)
{
#if FAN_HAS_OPENSSL
if (conn->ssl_error)
{
lua_pushfstring(co, "SSLError: %s", conn->ssl_error);
}
else
{
#endif
int err = bufferevent_socket_get_dns_error(bev);
if (err)
{
lua_pushstring(co, evutil_gai_strerror(err));
}
else if (EVUTIL_SOCKET_ERROR())
{
lua_pushstring(
co, evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
}
else
{
lua_pushnil(co);
}
#if FAN_HAS_OPENSSL
}
#endif
}
else if (events & BEV_EVENT_EOF)
{
lua_pushliteral(co, "server disconnected");
}
else
{
lua_pushnil(co);
}
FAN_RESUME(co, mainthread, 1);
POP_REF(mainthread);
}
}
}
#if FAN_HAS_OPENSSL
static int ssl_verifypeer_cb(int preverify_ok, X509_STORE_CTX *ctx)
{
if (!preverify_ok)
{
SSL *ssl = X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx());
Conn *conn = SSL_get_ex_data(ssl, conn_index);
int err = X509_STORE_CTX_get_error(ctx);
conn->ssl_error = strdup(X509_verify_cert_error_string(err));
}
return preverify_ok;
}
#endif
static void luatcpd_reconnect(Conn *conn)
{
if (conn->buf)
{
bufferevent_free(conn->buf);
conn->buf = NULL;
}
#if FAN_HAS_OPENSSL
conn->ssl_error = 0;
if (conn->sslctx)
{
SSL *ssl = SSL_new(conn->sslctx->ssl_ctx);
SSL_set_ex_data(ssl, conn_index, conn);
if (conn->ssl_verifyhost && (conn->ssl_host ?: conn->host))
{
SSL_set_hostflags(ssl, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS);
if (!SSL_set1_host(ssl, conn->ssl_host ?: conn->host))
{
printf("SSL_set1_host '%s' failed!\n", conn->ssl_host ?: conn->host);
}
}
/* Enable peer verification (with a non-null callback if desired) */
if (conn->ssl_verifypeer)
{
SSL_set_verify(ssl, SSL_VERIFY_PEER, NULL);
}
else
{
SSL_set_verify(ssl, SSL_VERIFY_NONE, NULL);
}
SSL_set_tlsext_host_name(ssl, conn->ssl_host ?: conn->host);
conn->buf = bufferevent_openssl_socket_new(
event_mgr_base(), -1, ssl, BUFFEREVENT_SSL_CONNECTING,
BEV_OPT_CLOSE_ON_FREE | BEV_OPT_DEFER_CALLBACKS);
#ifdef EVENT__NUMERIC_VERSION
#if (EVENT__NUMERIC_VERSION >= 0x02010500)
bufferevent_openssl_set_allow_dirty_shutdown(conn->buf, 1);
#endif
#endif
}
else
{
#endif
conn->buf = bufferevent_socket_new(
event_mgr_base(), -1, BEV_OPT_CLOSE_ON_FREE | BEV_OPT_DEFER_CALLBACKS);
#if FAN_HAS_OPENSSL
}
#endif
int rc = bufferevent_socket_connect_hostname(conn->buf, event_mgr_dnsbase(),
AF_UNSPEC,
conn->host, conn->port);
evutil_socket_t fd = bufferevent_getfd(conn->buf);
if (conn->send_buffer_size)
{
setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &conn->send_buffer_size,
sizeof(conn->send_buffer_size));
}
if (conn->receive_buffer_size)
{
setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &conn->receive_buffer_size,
sizeof(conn->receive_buffer_size));
}
#ifdef IP_BOUND_IF
if (conn->interface)
{
setsockopt(fd, IPPROTO_IP, IP_BOUND_IF, &conn->interface, sizeof(conn->interface));
}
#endif
if (rc < 0)
{
LOGE("could not connect to %s:%d %s", conn->host, conn->port,
evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
bufferevent_free(conn->buf);
conn->buf = NULL;
return;
}
bufferevent_enable(conn->buf, EV_WRITE | EV_READ);
bufferevent_setcb(conn->buf, tcpd_conn_readcb, tcpd_conn_writecb,
tcpd_conn_eventcb, conn);
}
LUA_API int tcpd_connect(lua_State *L)
{
event_mgr_init();
luaL_checktype(L, 1, LUA_TTABLE);
lua_settop(L, 1);
Conn *conn = lua_newuserdata(L, sizeof(Conn));
memset(conn, 0, sizeof(Conn));
luaL_getmetatable(L, LUA_TCPD_CONNECTION_TYPE);
lua_setmetatable(L, -2);
conn->mainthread = utlua_mainthread(L);
conn->buf = NULL;
#if FAN_HAS_OPENSSL
conn->sslctx = NULL;
conn->ssl_error = 0;
#endif
conn->send_buffer_size = 0;
conn->receive_buffer_size = 0;
SET_FUNC_REF_FROM_TABLE(L, conn->onReadRef, 1, "onread")
SET_FUNC_REF_FROM_TABLE(L, conn->onSendReadyRef, 1, "onsendready")
SET_FUNC_REF_FROM_TABLE(L, conn->onDisconnectedRef, 1, "ondisconnected")
SET_FUNC_REF_FROM_TABLE(L, conn->onConnectedRef, 1, "onconnected")
DUP_STR_FROM_TABLE(L, conn->host, 1, "host")
SET_INT_FROM_TABLE(L, conn->port, 1, "port")
lua_getfield(L, 1, "ssl");
int ssl = lua_toboolean(L, -1);
lua_pop(L, 1);
#if FAN_HAS_OPENSSL
lua_getfield(L, 1, "ssl_verifyhost");
conn->ssl_verifyhost = (int)luaL_optinteger(L, -1, 1);
lua_pop(L, 1);
lua_getfield(L, 1, "ssl_verifypeer");
conn->ssl_verifypeer = (int)luaL_optinteger(L, -1, 1);
lua_pop(L, 1);
DUP_STR_FROM_TABLE(L, conn->ssl_host, 1, "ssl_host")
if (ssl)
{
lua_getfield(L, 1, "cainfo");
const char *cainfo = luaL_optstring(L, -1, NULL);
lua_pop(L, 1);
lua_getfield(L, 1, "capath");
const char *capath = luaL_optstring(L, -1, NULL);
lua_pop(L, 1);
lua_getfield(L, 1, "pkcs12.path");
const char *p12path = luaL_optstring(L, -1, NULL);
lua_pop(L, 1);
lua_getfield(L, 1, "pkcs12.password");
const char *p12password = luaL_optstring(L, -1, NULL);
lua_pop(L, 1);
if (!cainfo && !capath)
{
cainfo = "cert.pem";
}
BYTEARRAY ba = {0};
bytearray_alloc(&ba, BUFLEN);
bytearray_writebuffer(&ba, "SSL_CTX:", strlen("SSL_CTX_"));
if (cainfo)
{
bytearray_writebuffer(&ba, cainfo, strlen(cainfo));
}
if (capath)
{
bytearray_writebuffer(&ba, capath, strlen(capath));
}
if (p12path)
{
bytearray_writebuffer(&ba, p12path, strlen(p12path));
}
if (p12password)
{
bytearray_writebuffer(&ba, p12password, strlen(p12password));
}
bytearray_write8(&ba, 0);
bytearray_read_ready(&ba);
char *cache_key = strdup((const char *)ba.buffer);
bytearray_dealloc(&ba);
lua_getfield(L, LUA_REGISTRYINDEX, cache_key);
if (lua_isnil(L, -1))
{
SSLCTX *sslctx = lua_newuserdata(L, sizeof(SSLCTX));
sslctx->key = strdup(cache_key);
sslctx->ssl_ctx = SSL_CTX_new(TLS_method());
conn->sslctx = sslctx;
sslctx->retainCount = 1;
lua_setfield(L, LUA_REGISTRYINDEX, cache_key);
if (!SSL_CTX_load_verify_locations(sslctx->ssl_ctx, cainfo, capath))
{
printf("SSL_CTX_load_verify_locations failed: cainfo=%s capath=%s\n", cainfo, capath);
}
#ifdef SSL_MODE_RELEASE_BUFFERS
SSL_CTX_set_mode(sslctx->ssl_ctx, SSL_MODE_RELEASE_BUFFERS);
#endif
SSL_CTX_set_options(sslctx->ssl_ctx, SSL_OP_NO_COMPRESSION);
SSL_CTX_set_verify(sslctx->ssl_ctx, SSL_VERIFY_PEER, ssl_verifypeer_cb);
while (p12path)
{
FILE *fp = NULL;
EVP_PKEY *pkey = NULL;
X509 *cert = NULL;
STACK_OF(X509) *ca = NULL;
PKCS12 *p12 = NULL;
if ((fp = fopen(p12path, "rb")) == NULL)
{
fprintf(stderr, "Error opening file %s\n", p12path);
break;
}
p12 = d2i_PKCS12_fp(fp, NULL);
fclose(fp);
if (!p12)
{
fprintf(stderr, "Error reading PKCS#12 file\n");
ERR_print_errors_fp(stderr);
break;
}
if (!PKCS12_parse(p12, p12password, &pkey, &cert, &ca))
{
fprintf(stderr, "Error parsing PKCS#12 file\n");
ERR_print_errors_fp(stderr);
}
else
{
SSL_CTX_use_certificate(sslctx->ssl_ctx, cert);
if (ca && sk_X509_num(ca))
{
int i = 0;
for (i = 0; i < sk_X509_num(ca); i++)
{
SSL_CTX_use_certificate(sslctx->ssl_ctx, sk_X509_value(ca, i));
}
}
SSL_CTX_use_PrivateKey(sslctx->ssl_ctx, pkey);
sk_X509_pop_free(ca, X509_free);
X509_free(cert);
EVP_PKEY_free(pkey);
}
PKCS12_free(p12);
p12 = NULL;
break;
}
}
else
{
SSLCTX *sslctx = lua_touserdata(L, -1);
sslctx->retainCount++;
conn->sslctx = sslctx;
}
lua_pop(L, 1);
FREE_STR(cache_key);
}
#else
if (ssl)
{
luaL_error(L, "ssl is not supported on micro version.");
}
#endif
SET_INT_FROM_TABLE(L, conn->send_buffer_size, 1, "send_buffer_size")
SET_INT_FROM_TABLE(L, conn->receive_buffer_size, 1, "receive_buffer_size")
lua_getfield(L, 1, "read_timeout");
lua_Number read_timeout = (int)luaL_optnumber(L, -1, 0);
conn->read_timeout = read_timeout;
lua_pop(L, 1);
lua_getfield(L, 1, "write_timeout");
lua_Number write_timeout = (int)luaL_optnumber(L, -1, 0);
conn->write_timeout = write_timeout;
lua_pop(L, 1);
lua_getfield(L, 1, "interface");
if (lua_type(L, -1) == LUA_TSTRING)
{
const char *interface = lua_tostring(L, -1);
conn->interface = if_nametoindex(interface);
}
lua_pop(L, 1);
luatcpd_reconnect(conn);
return 1;
}
static const luaL_Reg tcpdlib[] = {
{"bind", tcpd_bind}, {"connect", tcpd_connect}, {NULL, NULL}};
LUA_API int tcpd_conn_close(lua_State *L)
{
Conn *conn = luaL_checkudata(L, 1, LUA_TCPD_CONNECTION_TYPE);
if (event_mgr_base_current() && conn->buf)
{
bufferevent_free(conn->buf);
conn->buf = NULL;
}
CLEAR_REF(L, conn->onReadRef)
CLEAR_REF(L, conn->onSendReadyRef)
CLEAR_REF(L, conn->onDisconnectedRef)
CLEAR_REF(L, conn->onConnectedRef)
FREE_STR(conn->host)
FREE_STR(conn->ssl_host)
#if FAN_HAS_OPENSSL
if (conn->sslctx)
{
conn->sslctx->retainCount--;
if (conn->sslctx->retainCount <= 0)
{
lua_pushnil(L);
lua_setfield(L, LUA_REGISTRYINDEX, conn->sslctx->key);
SSL_CTX_free(conn->sslctx->ssl_ctx);
free(conn->sslctx->key);
}
conn->sslctx = NULL;
}
#endif
return 0;
}
LUA_API int tcpd_conn_gc(lua_State *L) { return tcpd_conn_close(L); }
LUA_API int tcpd_accept_remote(lua_State *L)
{
ACCEPT *accept = luaL_checkudata(L, 1, LUA_TCPD_ACCEPT_TYPE);
lua_newtable(L);
lua_pushstring(L, accept->ip);
lua_setfield(L, -2, "ip");
lua_pushinteger(L, accept->port);
lua_setfield(L, -2, "port");
return 1;
}
#ifdef __linux__
LUA_API int tcpd_accept_original_dst(lua_State *L)
{
ACCEPT *accept = luaL_checkudata(L, 1, LUA_TCPD_ACCEPT_TYPE);
evutil_socket_t fd = bufferevent_getfd(accept->buf);
struct sockaddr_storage ss;
socklen_t len = sizeof(struct sockaddr_storage);
if (getsockopt(fd, SOL_IP, SO_ORIGINAL_DST, &ss, &len))
{
lua_pushnil(L);
lua_pushfstring(L, "getsockopt: %s", strerror(errno));
return 2;
}
char host[INET6_ADDRSTRLEN];
int port = 0;
if (ss.ss_family == AF_INET)
{
struct sockaddr_in *addr_in = (struct sockaddr_in *)&ss;
port = ntohs(((struct sockaddr_in *)&ss)->sin_port);
inet_ntop(addr_in->sin_family, (void *)&(addr_in->sin_addr), host,
INET_ADDRSTRLEN);
}
else if (ss.ss_family == AF_INET6)
{
struct sockaddr_in6 *addr_in = (struct sockaddr_in6 *)&ss;
port = ntohs(((struct sockaddr_in6 *)&ss)->sin6_port);
inet_ntop(addr_in->sin6_family, (void *)&(addr_in->sin6_addr), host,
INET6_ADDRSTRLEN);
}
lua_pushstring(L, host);
lua_pushinteger(L, port);
return 2;
}
#endif
LUA_API int tcpd_accept_getsockname(lua_State *L)
{
ACCEPT *accept = luaL_checkudata(L, 1, LUA_TCPD_ACCEPT_TYPE);
evutil_socket_t fd = bufferevent_getfd(accept->buf);
struct sockaddr_storage ss;
socklen_t len = sizeof(struct sockaddr_storage);
if (getsockname(fd, (struct sockaddr *)&ss, &len))
{
lua_pushnil(L);
lua_pushfstring(L, "getsockname: %s", strerror(errno));
return 2;
}
char host[INET6_ADDRSTRLEN];
int port = 0;
if (ss.ss_family == AF_INET)
{
struct sockaddr_in *addr_in = (struct sockaddr_in *)&ss;
port = ntohs(((struct sockaddr_in *)&ss)->sin_port);
inet_ntop(addr_in->sin_family, (void *)&(addr_in->sin_addr), host,
INET_ADDRSTRLEN);
}
else if (ss.ss_family == AF_INET6)
{
struct sockaddr_in6 *addr_in = (struct sockaddr_in6 *)&ss;
port = ntohs(((struct sockaddr_in6 *)&ss)->sin6_port);
inet_ntop(addr_in->sin6_family, (void *)&(addr_in->sin6_addr), host,
INET6_ADDRSTRLEN);
}
lua_pushstring(L, host);
lua_pushinteger(L, port);
return 2;
}
LUA_API int tcpd_accept_close(lua_State *L)
{
ACCEPT *accept = luaL_checkudata(L, 1, LUA_TCPD_ACCEPT_TYPE);
if (event_mgr_base_current() && accept->buf)
{
bufferevent_free(accept->buf);
accept->buf = NULL;
}
TCPD_ACCEPT_UNREF(accept)
return 0;
}
LUA_API int lua_tcpd_accept_gc(lua_State *L) { return tcpd_accept_close(L); }
LUA_API int tcpd_accept_read_pause(lua_State *L)
{
ACCEPT *accept = luaL_checkudata(L, 1, LUA_TCPD_ACCEPT_TYPE);
if (accept->buf)
{
bufferevent_disable(accept->buf, EV_READ);
}
return 0;
}
LUA_API int tcpd_accept_read_resume(lua_State *L)
{
ACCEPT *accept = luaL_checkudata(L, 1, LUA_TCPD_ACCEPT_TYPE);
if (accept->buf)
{
bufferevent_enable(accept->buf, EV_READ);
}
return 0;
}
LUA_API int tcpd_conn_read_pause(lua_State *L)
{
Conn *conn = luaL_checkudata(L, 1, LUA_TCPD_CONNECTION_TYPE);
if (conn->buf)
{
bufferevent_disable(conn->buf, EV_READ);
}
return 0;
}
LUA_API int tcpd_conn_read_resume(lua_State *L)
{
Conn *conn = luaL_checkudata(L, 1, LUA_TCPD_CONNECTION_TYPE);
if (conn->buf)
{
bufferevent_enable(conn->buf, EV_READ);
}
return 0;
}
LUA_API int tcpd_conn_send(lua_State *L)
{
Conn *conn = luaL_checkudata(L, 1, LUA_TCPD_CONNECTION_TYPE);
size_t len = 0;
const char *data = luaL_checklstring(L, 2, &len);
if (data && len > 0 && conn->buf)
{
if (conn->read_timeout > 0)
{
struct timeval tv1;
d2tv(conn->read_timeout, &tv1);
if (conn->write_timeout > 0)
{
struct timeval tv2;
d2tv(conn->write_timeout, &tv2);
bufferevent_set_timeouts(conn->buf, &tv1, &tv2);
}
else
{
bufferevent_set_timeouts(conn->buf, &tv1, NULL);
}
}
else
{
if (conn->write_timeout > 0)
{
struct timeval tv2;
d2tv(conn->write_timeout, &tv2);
bufferevent_set_timeouts(conn->buf, NULL, &tv2);
}
}
bufferevent_write(conn->buf, data, len);
size_t total = evbuffer_get_length(bufferevent_get_output(conn->buf));
lua_pushinteger(L, total);
}
else
{
lua_pushinteger(L, -1);
}
return 1;
}
LUA_API int tcpd_conn_reconnect(lua_State *L)
{
Conn *conn = luaL_checkudata(L, 1, LUA_TCPD_CONNECTION_TYPE);
luatcpd_reconnect(conn);
return 0;
}
LUA_API int tcpd_accept_flush(lua_State *L)
{
ACCEPT *accept = luaL_checkudata(L, 1, LUA_TCPD_ACCEPT_TYPE);
int mode = luaL_optinteger(L, 2, BEV_NORMAL);
lua_pushinteger(L, bufferevent_flush(accept->buf, EV_WRITE, mode));
return 1;
}
LUA_API int tcpd_accept_send(lua_State *L)
{
ACCEPT *accept = luaL_checkudata(L, 1, LUA_TCPD_ACCEPT_TYPE);
size_t len = 0;
const char *data = luaL_checklstring(L, 2, &len);
if (data && len > 0 && accept->buf)
{
bufferevent_write(accept->buf, data, len);
size_t total = evbuffer_get_length(bufferevent_get_output(accept->buf));
lua_pushinteger(L, total);
}
else
{
lua_pushinteger(L, -1);
}
return 1;
}
LUA_API int luaopen_fan_tcpd(lua_State *L)
{
#if FAN_HAS_OPENSSL
conn_index = SSL_get_ex_new_index(0, "conn_index", NULL, NULL, NULL);
#endif
luaL_newmetatable(L, LUA_TCPD_CONNECTION_TYPE);
lua_pushcfunction(L, &tcpd_conn_send);
lua_setfield(L, -2, "send");
lua_pushcfunction(L, &tcpd_conn_read_pause);
lua_setfield(L, -2, "pause_read");
lua_pushcfunction(L, &tcpd_conn_read_resume);
lua_setfield(L, -2, "resume_read");
lua_pushcfunction(L, &tcpd_conn_close);
lua_setfield(L, -2, "close");
lua_pushcfunction(L, &tcpd_conn_reconnect);
lua_setfield(L, -2, "reconnect");
lua_pushstring(L, "__index");
lua_pushvalue(L, -2);
lua_rawset(L, -3);
lua_pushstring(L, "__gc");
lua_pushcfunction(L, &tcpd_conn_gc);
lua_rawset(L, -3);
lua_pop(L, 1);
luaL_newmetatable(L, LUA_TCPD_ACCEPT_TYPE);
lua_pushcfunction(L, &tcpd_accept_send);
lua_setfield(L, -2, "send");
lua_pushcfunction(L, &tcpd_accept_flush);
lua_setfield(L, -2, "flush");
lua_pushcfunction(L, &tcpd_accept_close);
lua_setfield(L, -2, "close");
lua_pushcfunction(L, &tcpd_accept_read_pause);
lua_setfield(L, -2, "pause_read");
lua_pushcfunction(L, &tcpd_accept_read_resume);
lua_setfield(L, -2, "resume_read");
lua_pushcfunction(L, &tcpd_accept_bind);
lua_setfield(L, -2, "bind");
lua_pushcfunction(L, &tcpd_accept_remote);
lua_setfield(L, -2, "remoteinfo");
lua_pushcfunction(L, &tcpd_accept_getsockname);
lua_setfield(L, -2, "getsockname");
#ifdef __linux__
lua_pushcfunction(L, &tcpd_accept_original_dst);
lua_setfield(L, -2, "original_dst");
#endif
lua_pushstring(L, "__index");
lua_pushvalue(L, -2);
lua_rawset(L, -3);
lua_pushstring(L, "__tostring");
lua_pushcfunction(L, &lua_tcpd_accept_tostring);
lua_rawset(L, -3);
lua_pushstring(L, "__gc");
lua_pushcfunction(L, &lua_tcpd_accept_gc);
lua_rawset(L, -3);
lua_pop(L, 1);
luaL_newmetatable(L, LUA_TCPD_SERVER_TYPE);
lua_pushstring(L, "close");
lua_pushcfunction(L, &lua_tcpd_server_close);
lua_rawset(L, -3);
lua_pushcfunction(L, &lua_tcpd_server_rebind);
lua_setfield(L, -2, "rebind");
lua_pushstring(L, "__gc");
lua_pushcfunction(L, &lua_tcpd_server_gc);
lua_rawset(L, -3);
lua_pushstring(L, "__tostring");
lua_pushcfunction(L, &lua_tcpd_server_tostring);
lua_rawset(L, -3);
lua_pushstring(L, "__index");
lua_pushvalue(L, -2);
lua_rawset(L, -3);
lua_pop(L, 1);
lua_newtable(L);
luaL_register(L, "tcpd", tcpdlib);
return 1;
}
| luafan/luafan | src/tcpd.c | C | mit | 34,905 |
#include <stdint.h>
const uint8_t
#if defined __GNUC__
__attribute__((aligned(4)))
#elif defined _MSC_VER
__declspec(align(4))
#endif
mrblib_extman_irep[] = {
0x45,0x54,0x49,0x52,0x30,0x30,0x30,0x33,0x5a,0x89,0x00,0x00,0x44,0xcb,0x4d,0x41,
0x54,0x5a,0x30,0x30,0x30,0x30,0x49,0x52,0x45,0x50,0x00,0x00,0x32,0x2d,0x30,0x30,
0x30,0x30,0x00,0x00,0x02,0x36,0x00,0x01,0x00,0x06,0x00,0x11,0x00,0x00,0x00,0x39,
0x05,0x00,0x80,0x00,0x44,0x00,0x80,0x00,0x45,0x00,0x80,0x00,0x48,0x00,0x80,0x00,
0xc0,0x02,0x00,0x01,0x46,0x40,0x80,0x00,0x48,0x00,0x80,0x00,0xc0,0x04,0x00,0x01,
0x46,0x80,0x80,0x00,0x48,0x00,0x80,0x00,0xc0,0x06,0x00,0x01,0x46,0xc0,0x80,0x00,
0x48,0x00,0x80,0x00,0xc0,0x08,0x00,0x01,0x46,0x00,0x81,0x00,0x48,0x00,0x80,0x00,
0xc0,0x0a,0x00,0x01,0x46,0x40,0x81,0x00,0x48,0x00,0x80,0x00,0xc0,0x0c,0x00,0x01,
0x46,0x80,0x81,0x00,0x48,0x00,0x80,0x00,0xc0,0x0e,0x00,0x01,0x46,0xc0,0x81,0x00,
0x48,0x00,0x80,0x00,0xc0,0x10,0x00,0x01,0x46,0x00,0x82,0x00,0x48,0x00,0x80,0x00,
0xc0,0x12,0x00,0x01,0x46,0x40,0x82,0x00,0x48,0x00,0x80,0x00,0xc0,0x14,0x00,0x01,
0x46,0x80,0x82,0x00,0x48,0x00,0x80,0x00,0xc0,0x16,0x00,0x01,0x46,0xc0,0x82,0x00,
0x48,0x00,0x80,0x00,0xc0,0x18,0x00,0x01,0x46,0x00,0x83,0x00,0x48,0x00,0x80,0x00,
0xc0,0x1a,0x00,0x01,0x46,0x40,0x83,0x00,0x48,0x00,0x80,0x00,0xc0,0x1c,0x00,0x01,
0x46,0x80,0x83,0x00,0x48,0x00,0x80,0x00,0xc0,0x1e,0x00,0x01,0x46,0xc0,0x83,0x00,
0x11,0x00,0x80,0x00,0x20,0x00,0x84,0x00,0x11,0x00,0x80,0x00,0x3d,0x00,0x00,0x01,
0xbd,0x00,0x80,0x01,0x3d,0x01,0x00,0x02,0x40,0x21,0x80,0x02,0xa1,0x41,0x84,0x00,
0x4a,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x13,0x52,0x75,0x6e,0x20,0x61,
0x73,0x20,0x6d,0x72,0x75,0x62,0x79,0x20,0x73,0x63,0x72,0x69,0x70,0x74,0x00,0x00,
0x01,0x2a,0x00,0x00,0x0a,0x41,0x6c,0x74,0x2b,0x43,0x74,0x72,0x6c,0x2b,0x52,0x00,
0x00,0x00,0x12,0x00,0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0f,0x6f,0x6e,0x5f,
0x6d,0x61,0x72,0x67,0x69,0x6e,0x5f,0x63,0x6c,0x69,0x63,0x6b,0x00,0x00,0x0f,0x6f,
0x6e,0x5f,0x64,0x6f,0x75,0x62,0x6c,0x65,0x5f,0x63,0x6c,0x69,0x63,0x6b,0x00,0x00,
0x12,0x6f,0x6e,0x5f,0x73,0x61,0x76,0x65,0x5f,0x70,0x6f,0x69,0x6e,0x74,0x5f,0x6c,
0x65,0x66,0x74,0x00,0x00,0x15,0x6f,0x6e,0x5f,0x73,0x61,0x76,0x65,0x5f,0x70,0x6f,
0x69,0x6e,0x74,0x5f,0x72,0x65,0x61,0x63,0x68,0x65,0x64,0x00,0x00,0x07,0x6f,0x6e,
0x5f,0x63,0x68,0x61,0x72,0x00,0x00,0x07,0x6f,0x6e,0x5f,0x73,0x61,0x76,0x65,0x00,
0x00,0x0e,0x6f,0x6e,0x5f,0x62,0x65,0x66,0x6f,0x72,0x65,0x5f,0x73,0x61,0x76,0x65,
0x00,0x00,0x0e,0x6f,0x6e,0x5f,0x73,0x77,0x69,0x74,0x63,0x68,0x5f,0x66,0x69,0x6c,
0x65,0x00,0x00,0x07,0x6f,0x6e,0x5f,0x6f,0x70,0x65,0x6e,0x00,0x00,0x0c,0x6f,0x6e,
0x5f,0x75,0x70,0x64,0x61,0x74,0x65,0x5f,0x75,0x69,0x00,0x00,0x06,0x6f,0x6e,0x5f,
0x6b,0x65,0x79,0x00,0x00,0x0e,0x6f,0x6e,0x5f,0x64,0x77,0x65,0x6c,0x6c,0x5f,0x73,
0x74,0x61,0x72,0x74,0x00,0x00,0x08,0x6f,0x6e,0x5f,0x63,0x6c,0x6f,0x73,0x65,0x00,
0x00,0x16,0x6f,0x6e,0x5f,0x75,0x73,0x65,0x72,0x5f,0x6c,0x69,0x73,0x74,0x5f,0x73,
0x65,0x6c,0x65,0x63,0x74,0x69,0x6f,0x6e,0x00,0x00,0x08,0x6f,0x6e,0x5f,0x73,0x74,
0x72,0x69,0x70,0x00,0x00,0x0c,0x6c,0x6f,0x61,0x64,0x5f,0x73,0x63,0x72,0x69,0x70,
0x74,0x73,0x00,0x00,0x0e,0x64,0x65,0x66,0x69,0x6e,0x65,0x5f,0x63,0x6f,0x6d,0x6d,
0x61,0x6e,0x64,0x00,0x00,0x00,0x02,0xd2,0x00,0x01,0x00,0x03,0x00,0x05,0x00,0x00,
0x00,0x3d,0x00,0x00,0x83,0xff,0xbf,0x00,0x12,0x00,0x80,0x00,0x03,0x00,0xc0,0x00,
0x92,0x00,0x80,0x00,0x83,0x00,0xc0,0x00,0x12,0x01,0x80,0x00,0x03,0x01,0xc0,0x00,
0x92,0x01,0x80,0x00,0x83,0x01,0xc0,0x00,0x12,0x02,0x80,0x00,0x03,0x02,0xc0,0x00,
0x92,0x02,0x80,0x00,0x83,0x02,0xc0,0x00,0x12,0x03,0x80,0x00,0x03,0x03,0xc0,0x00,
0x92,0x03,0x80,0x00,0x83,0x03,0xc0,0x00,0x12,0x04,0x80,0x00,0x03,0x04,0xc0,0x00,
0x92,0x04,0x80,0x00,0x83,0x04,0xc0,0x00,0x12,0x05,0x80,0x00,0x03,0x05,0xc0,0x00,
0x92,0x05,0x80,0x00,0x83,0x05,0xc0,0x00,0x12,0x06,0x80,0x00,0x03,0x06,0xc0,0x00,
0x92,0x06,0x80,0x00,0x83,0x06,0xc0,0x00,0x12,0x07,0x80,0x00,0x03,0x07,0xc0,0x00,
0x92,0x07,0x80,0x00,0x83,0x07,0xc0,0x00,0x12,0x08,0x80,0x00,0x03,0x08,0xc0,0x00,
0x92,0x08,0x80,0x00,0x37,0x40,0x80,0x00,0x0e,0x09,0x80,0x00,0x83,0x04,0xc0,0x00,
0x8e,0x09,0x80,0x00,0x3f,0x40,0x80,0x00,0x0e,0x0a,0x80,0x00,0x3f,0x40,0x80,0x00,
0x8e,0x0a,0x80,0x00,0x06,0x00,0x80,0x00,0x47,0x40,0x80,0x00,0x45,0x00,0x80,0x00,
0x05,0x00,0x80,0x00,0x05,0x00,0x00,0x01,0x43,0x80,0x85,0x00,0xc5,0x00,0x80,0x00,
0x06,0x00,0x80,0x00,0x40,0x05,0x00,0x01,0x21,0xc0,0x85,0x00,0x06,0x00,0x80,0x00,
0x40,0x07,0x00,0x01,0x21,0x00,0x86,0x00,0x06,0x00,0x80,0x00,0x40,0x09,0x00,0x01,
0x21,0x40,0x86,0x00,0x29,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1a,
0x00,0x12,0x45,0x56,0x45,0x4e,0x54,0x5f,0x4d,0x41,0x52,0x47,0x49,0x4e,0x5f,0x43,
0x4c,0x49,0x43,0x4b,0x00,0x00,0x12,0x45,0x56,0x45,0x4e,0x54,0x5f,0x44,0x4f,0x55,
0x42,0x4c,0x45,0x5f,0x43,0x4c,0x49,0x43,0x4b,0x00,0x00,0x15,0x45,0x56,0x45,0x4e,
0x54,0x5f,0x53,0x41,0x56,0x45,0x5f,0x50,0x4f,0x49,0x4e,0x54,0x5f,0x4c,0x45,0x46,
0x54,0x00,0x00,0x18,0x45,0x56,0x45,0x4e,0x54,0x5f,0x53,0x41,0x56,0x45,0x5f,0x50,
0x4f,0x49,0x4e,0x54,0x5f,0x52,0x45,0x41,0x43,0x48,0x45,0x44,0x00,0x00,0x0a,0x45,
0x56,0x45,0x4e,0x54,0x5f,0x43,0x48,0x41,0x52,0x00,0x00,0x0a,0x45,0x56,0x45,0x4e,
0x54,0x5f,0x53,0x41,0x56,0x45,0x00,0x00,0x11,0x45,0x56,0x45,0x4e,0x54,0x5f,0x42,
0x45,0x46,0x4f,0x52,0x45,0x5f,0x53,0x41,0x56,0x45,0x00,0x00,0x11,0x45,0x56,0x45,
0x4e,0x54,0x5f,0x53,0x57,0x49,0x54,0x43,0x48,0x5f,0x46,0x49,0x4c,0x45,0x00,0x00,
0x0a,0x45,0x56,0x45,0x4e,0x54,0x5f,0x4f,0x50,0x45,0x4e,0x00,0x00,0x0f,0x45,0x56,
0x45,0x4e,0x54,0x5f,0x55,0x50,0x44,0x41,0x54,0x45,0x5f,0x55,0x49,0x00,0x00,0x09,
0x45,0x56,0x45,0x4e,0x54,0x5f,0x4b,0x45,0x59,0x00,0x00,0x11,0x45,0x56,0x45,0x4e,
0x54,0x5f,0x44,0x57,0x45,0x4c,0x4c,0x5f,0x53,0x54,0x41,0x52,0x54,0x00,0x00,0x0b,
0x45,0x56,0x45,0x4e,0x54,0x5f,0x43,0x4c,0x4f,0x53,0x45,0x00,0x00,0x11,0x45,0x56,
0x45,0x4e,0x54,0x5f,0x45,0x44,0x49,0x54,0x4f,0x52,0x5f,0x4c,0x49,0x4e,0x45,0x00,
0x00,0x11,0x45,0x56,0x45,0x4e,0x54,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x5f,0x4c,
0x49,0x4e,0x45,0x00,0x00,0x11,0x45,0x56,0x45,0x4e,0x54,0x5f,0x4f,0x50,0x45,0x4e,
0x5f,0x53,0x57,0x49,0x54,0x43,0x48,0x00,0x00,0x19,0x45,0x56,0x45,0x4e,0x54,0x5f,
0x55,0x53,0x45,0x52,0x5f,0x4c,0x49,0x53,0x54,0x5f,0x53,0x45,0x4c,0x45,0x43,0x54,
0x49,0x4f,0x4e,0x00,0x00,0x0b,0x45,0x56,0x45,0x4e,0x54,0x5f,0x53,0x54,0x52,0x49,
0x50,0x00,0x00,0x0f,0x40,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,
0x65,0x72,0x73,0x00,0x00,0x09,0x40,0x6d,0x65,0x6e,0x75,0x5f,0x69,0x64,0x78,0x00,
0x00,0x09,0x40,0x63,0x6f,0x6d,0x6d,0x61,0x6e,0x64,0x73,0x00,0x00,0x0f,0x40,0x73,
0x68,0x6f,0x72,0x74,0x63,0x75,0x74,0x73,0x5f,0x75,0x73,0x65,0x64,0x00,0x00,0x0e,
0x53,0x74,0x79,0x6c,0x69,0x6e,0x67,0x43,0x6f,0x6e,0x74,0x65,0x78,0x74,0x00,0x00,
0x07,0x6f,0x6e,0x5f,0x6f,0x70,0x65,0x6e,0x00,0x00,0x0e,0x6f,0x6e,0x5f,0x73,0x77,
0x69,0x74,0x63,0x68,0x5f,0x66,0x69,0x6c,0x65,0x00,0x00,0x07,0x6f,0x6e,0x5f,0x63,
0x68,0x61,0x72,0x00,0x00,0x00,0x07,0x2e,0x00,0x01,0x00,0x05,0x00,0x1d,0x00,0x00,
0x00,0xcb,0x00,0x00,0x48,0x00,0x80,0x00,0xc0,0x00,0x00,0x01,0x46,0x00,0x80,0x00,
0x48,0x00,0x80,0x00,0xc0,0x02,0x00,0x01,0x46,0x40,0x80,0x00,0x48,0x00,0x80,0x00,
0xc0,0x04,0x00,0x01,0x46,0x80,0x80,0x00,0x48,0x00,0x80,0x00,0xc0,0x06,0x00,0x01,
0x46,0xc0,0x80,0x00,0x48,0x00,0x80,0x00,0xc0,0x08,0x00,0x01,0x46,0x00,0x81,0x00,
0x48,0x00,0x80,0x00,0xc0,0x0a,0x00,0x01,0x46,0x40,0x81,0x00,0x48,0x00,0x80,0x00,
0xc0,0x0c,0x00,0x01,0x46,0x80,0x81,0x00,0x48,0x00,0x80,0x00,0xc0,0x0e,0x00,0x01,
0x46,0xc0,0x81,0x00,0x48,0x00,0x80,0x00,0xc0,0x10,0x00,0x01,0x46,0x00,0x82,0x00,
0x48,0x00,0x80,0x00,0xc0,0x12,0x00,0x01,0x46,0x40,0x82,0x00,0x48,0x00,0x80,0x00,
0xc0,0x14,0x00,0x01,0x46,0x80,0x82,0x00,0x48,0x00,0x80,0x00,0xc0,0x16,0x00,0x01,
0x46,0xc0,0x82,0x00,0x48,0x00,0x80,0x00,0xc0,0x18,0x00,0x01,0x46,0x00,0x83,0x00,
0x48,0x00,0x80,0x00,0xc0,0x1a,0x00,0x01,0x46,0x40,0x83,0x00,0x48,0x00,0x80,0x00,
0xc0,0x1c,0x00,0x01,0x46,0x80,0x83,0x00,0x48,0x00,0x80,0x00,0xc0,0x1e,0x00,0x01,
0x46,0xc0,0x83,0x00,0x48,0x00,0x80,0x00,0xc0,0x20,0x00,0x01,0x46,0x00,0x84,0x00,
0x48,0x00,0x80,0x00,0xc0,0x22,0x00,0x01,0x46,0x40,0x84,0x00,0x48,0x00,0x80,0x00,
0xc0,0x24,0x00,0x01,0x46,0x80,0x84,0x00,0x48,0x00,0x80,0x00,0xc0,0x26,0x00,0x01,
0x46,0xc0,0x84,0x00,0x48,0x00,0x80,0x00,0xc0,0x28,0x00,0x01,0x46,0x00,0x85,0x00,
0x48,0x00,0x80,0x00,0xc0,0x2a,0x00,0x01,0x46,0x40,0x85,0x00,0x48,0x00,0x80,0x00,
0xc0,0x2c,0x00,0x01,0x46,0x80,0x85,0x00,0x48,0x00,0x80,0x00,0xc0,0x2e,0x00,0x01,
0x46,0xc0,0x85,0x00,0x48,0x00,0x80,0x00,0xc0,0x30,0x00,0x01,0x46,0x00,0x86,0x00,
0x48,0x00,0x80,0x00,0xc0,0x32,0x00,0x01,0x46,0x40,0x86,0x00,0x48,0x00,0x80,0x00,
0xc0,0x34,0x00,0x01,0x46,0x80,0x86,0x00,0x48,0x00,0x80,0x00,0xc0,0x36,0x00,0x01,
0x46,0xc0,0x86,0x00,0x48,0x00,0x80,0x00,0xc0,0x38,0x00,0x01,0x46,0x00,0x87,0x00,
0x06,0x00,0x80,0x00,0x04,0x0f,0x00,0x01,0xa0,0x40,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x10,0x00,0x01,0x84,0x10,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x11,0x00,0x01,0x84,0x11,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x12,0x00,0x01,0x84,0x12,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x13,0x00,0x01,0x84,0x13,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x14,0x00,0x01,0x84,0x14,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x15,0x00,0x01,0x04,0x0b,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x15,0x00,0x01,0x04,0x16,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x16,0x00,0x01,0x04,0x17,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x17,0x00,0x01,0x04,0x18,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x18,0x00,0x01,0x84,0x0c,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x19,0x00,0x01,0x04,0x0c,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x19,0x00,0x01,0x84,0x0b,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x1a,0x00,0x01,0x04,0x03,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x1a,0x00,0x01,0x84,0x03,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x1b,0x00,0x01,0x04,0x04,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x1b,0x00,0x01,0x84,0x04,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x1c,0x00,0x01,0x04,0x05,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x1c,0x00,0x01,0x84,0x05,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x1d,0x00,0x01,0x04,0x06,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x1d,0x00,0x01,0x84,0x06,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x1e,0x00,0x01,0x04,0x07,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x1e,0x00,0x01,0x84,0x07,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x1f,0x00,0x01,0x04,0x08,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x1f,0x00,0x01,0x84,0x08,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x20,0x00,0x01,0x04,0x09,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x20,0x00,0x01,0x84,0x09,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x04,0x21,0x00,0x01,0x04,0x0a,0x80,0x01,0x20,0xc1,0x87,0x00,0x06,0x00,0x80,0x00,
0x84,0x21,0x00,0x01,0x84,0x0a,0x80,0x01,0x20,0xc1,0x87,0x00,0x29,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x44,0x00,0x0c,0x64,0x69,0x73,0x70,0x61,0x74,
0x63,0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0c,0x63,0x61,0x6c,0x6c,0x5f,0x63,0x6f,
0x6d,0x6d,0x61,0x6e,0x64,0x00,0x00,0x0e,0x64,0x65,0x66,0x69,0x6e,0x65,0x5f,0x63,
0x6f,0x6d,0x6d,0x61,0x6e,0x64,0x00,0x00,0x05,0x73,0x65,0x6e,0x64,0x32,0x00,0x00,
0x07,0x63,0x6f,0x6d,0x6d,0x61,0x6e,0x64,0x00,0x00,0x11,0x61,0x64,0x64,0x5f,0x65,
0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x00,0x00,0x0f,0x6f,
0x6e,0x5f,0x6d,0x61,0x72,0x67,0x69,0x6e,0x5f,0x63,0x6c,0x69,0x63,0x6b,0x00,0x00,
0x0f,0x6f,0x6e,0x5f,0x64,0x6f,0x75,0x62,0x6c,0x65,0x5f,0x63,0x6c,0x69,0x63,0x6b,
0x00,0x00,0x12,0x6f,0x6e,0x5f,0x73,0x61,0x76,0x65,0x5f,0x70,0x6f,0x69,0x6e,0x74,
0x5f,0x6c,0x65,0x66,0x74,0x00,0x00,0x15,0x6f,0x6e,0x5f,0x73,0x61,0x76,0x65,0x5f,
0x70,0x6f,0x69,0x6e,0x74,0x5f,0x72,0x65,0x61,0x63,0x68,0x65,0x64,0x00,0x00,0x07,
0x6f,0x6e,0x5f,0x6f,0x70,0x65,0x6e,0x00,0x00,0x0e,0x6f,0x6e,0x5f,0x73,0x77,0x69,
0x74,0x63,0x68,0x5f,0x66,0x69,0x6c,0x65,0x00,0x00,0x0e,0x6f,0x6e,0x5f,0x73,0x61,
0x76,0x65,0x5f,0x62,0x65,0x66,0x6f,0x72,0x65,0x00,0x00,0x07,0x6f,0x6e,0x5f,0x73,
0x61,0x76,0x65,0x00,0x00,0x0c,0x6f,0x6e,0x5f,0x75,0x70,0x64,0x61,0x74,0x65,0x5f,
0x75,0x69,0x00,0x00,0x07,0x6f,0x6e,0x5f,0x63,0x68,0x61,0x72,0x00,0x00,0x06,0x6f,
0x6e,0x5f,0x6b,0x65,0x79,0x00,0x00,0x0e,0x6f,0x6e,0x5f,0x64,0x77,0x65,0x6c,0x6c,
0x5f,0x73,0x74,0x61,0x72,0x74,0x00,0x00,0x08,0x6f,0x6e,0x5f,0x63,0x6c,0x6f,0x73,
0x65,0x00,0x00,0x0e,0x6f,0x6e,0x5f,0x6f,0x70,0x65,0x6e,0x5f,0x73,0x77,0x69,0x74,
0x63,0x68,0x00,0x00,0x0e,0x6f,0x6e,0x5f,0x65,0x64,0x69,0x74,0x6f,0x72,0x5f,0x6c,
0x69,0x6e,0x65,0x00,0x00,0x0e,0x6f,0x6e,0x5f,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,
0x6c,0x69,0x6e,0x65,0x00,0x00,0x0a,0x73,0x74,0x72,0x69,0x70,0x5f,0x73,0x68,0x6f,
0x77,0x00,0x00,0x0e,0x75,0x73,0x65,0x72,0x5f,0x6c,0x69,0x73,0x74,0x5f,0x73,0x68,
0x6f,0x77,0x00,0x00,0x0c,0x63,0x75,0x72,0x72,0x65,0x6e,0x74,0x5f,0x66,0x69,0x6c,
0x65,0x00,0x00,0x0c,0x6c,0x6f,0x61,0x64,0x5f,0x73,0x63,0x72,0x69,0x70,0x74,0x73,
0x00,0x00,0x10,0x6f,0x6e,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x73,0x77,0x69,
0x74,0x63,0x68,0x00,0x00,0x0e,0x67,0x72,0x61,0x62,0x5f,0x6c,0x69,0x6e,0x65,0x5f,
0x66,0x72,0x6f,0x6d,0x00,0x00,0x0c,0x6f,0x6e,0x5f,0x6c,0x69,0x6e,0x65,0x5f,0x63,
0x68,0x61,0x72,0x00,0x00,0x0d,0x61,0x74,0x74,0x72,0x5f,0x61,0x63,0x63,0x65,0x73,
0x73,0x6f,0x72,0x00,0x00,0x0e,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,
0x6c,0x65,0x72,0x73,0x00,0x00,0x0c,0x61,0x6c,0x69,0x61,0x73,0x5f,0x6d,0x65,0x74,
0x68,0x6f,0x64,0x00,0x00,0x0a,0x73,0x65,0x6e,0x64,0x45,0x64,0x69,0x74,0x6f,0x72,
0x00,0x00,0x0b,0x73,0x65,0x6e,0x64,0x5f,0x65,0x64,0x69,0x74,0x6f,0x72,0x00,0x00,
0x0a,0x73,0x65,0x6e,0x64,0x4f,0x75,0x74,0x70,0x75,0x74,0x00,0x00,0x0b,0x73,0x65,
0x6e,0x64,0x5f,0x6f,0x75,0x74,0x70,0x75,0x74,0x00,0x00,0x0c,0x63,0x6f,0x6e,0x73,
0x74,0x61,0x6e,0x74,0x4e,0x61,0x6d,0x65,0x00,0x00,0x0d,0x63,0x6f,0x6e,0x73,0x74,
0x61,0x6e,0x74,0x5f,0x6e,0x61,0x6d,0x65,0x00,0x00,0x0b,0x6d,0x65,0x6e,0x75,0x43,
0x6f,0x6d,0x6d,0x61,0x6e,0x64,0x00,0x00,0x0c,0x6d,0x65,0x6e,0x75,0x5f,0x63,0x6f,
0x6d,0x6d,0x61,0x6e,0x64,0x00,0x00,0x0f,0x75,0x70,0x64,0x61,0x74,0x65,0x53,0x74,
0x61,0x74,0x75,0x73,0x42,0x61,0x72,0x00,0x00,0x11,0x75,0x70,0x64,0x61,0x74,0x65,
0x5f,0x73,0x74,0x61,0x74,0x75,0x73,0x5f,0x62,0x61,0x72,0x00,0x00,0x09,0x73,0x74,
0x72,0x69,0x70,0x53,0x68,0x6f,0x77,0x00,0x00,0x08,0x73,0x74,0x72,0x69,0x70,0x53,
0x65,0x74,0x00,0x00,0x09,0x73,0x74,0x72,0x69,0x70,0x5f,0x73,0x65,0x74,0x00,0x00,
0x0c,0x73,0x74,0x72,0x69,0x70,0x53,0x65,0x74,0x4c,0x69,0x73,0x74,0x00,0x00,0x0e,
0x73,0x74,0x72,0x69,0x70,0x5f,0x73,0x65,0x74,0x5f,0x6c,0x69,0x73,0x74,0x00,0x00,
0x0a,0x73,0x74,0x72,0x69,0x70,0x56,0x61,0x6c,0x75,0x65,0x00,0x00,0x0b,0x73,0x74,
0x72,0x69,0x70,0x5f,0x76,0x61,0x6c,0x75,0x65,0x00,0x00,0x0b,0x6c,0x6f,0x61,0x64,
0x53,0x63,0x72,0x69,0x70,0x74,0x73,0x00,0x00,0x0b,0x63,0x75,0x72,0x72,0x65,0x6e,
0x74,0x46,0x69,0x6c,0x65,0x00,0x00,0x0c,0x75,0x73,0x65,0x72,0x4c,0x69,0x73,0x74,
0x53,0x68,0x6f,0x77,0x00,0x00,0x0d,0x6f,0x6e,0x4d,0x61,0x72,0x67,0x69,0x6e,0x43,
0x6c,0x69,0x63,0x6b,0x00,0x00,0x0d,0x6f,0x6e,0x44,0x6f,0x75,0x62,0x6c,0x65,0x43,
0x6c,0x69,0x63,0x6b,0x00,0x00,0x0f,0x6f,0x6e,0x53,0x61,0x76,0x65,0x50,0x6f,0x69,
0x6e,0x74,0x4c,0x65,0x66,0x74,0x00,0x00,0x12,0x6f,0x6e,0x53,0x61,0x76,0x65,0x50,
0x6f,0x69,0x6e,0x74,0x52,0x65,0x61,0x63,0x68,0x65,0x64,0x00,0x00,0x06,0x6f,0x6e,
0x4f,0x70,0x65,0x6e,0x00,0x00,0x0c,0x6f,0x6e,0x53,0x77,0x69,0x74,0x63,0x68,0x46,
0x69,0x6c,0x65,0x00,0x00,0x0c,0x6f,0x6e,0x53,0x61,0x76,0x65,0x42,0x65,0x66,0x6f,
0x72,0x65,0x00,0x00,0x06,0x6f,0x6e,0x53,0x61,0x76,0x65,0x00,0x00,0x0a,0x6f,0x6e,
0x55,0x70,0x64,0x61,0x74,0x65,0x55,0x49,0x00,0x00,0x06,0x6f,0x6e,0x43,0x68,0x61,
0x72,0x00,0x00,0x05,0x6f,0x6e,0x4b,0x65,0x79,0x00,0x00,0x0c,0x6f,0x6e,0x44,0x77,
0x65,0x6c,0x6c,0x53,0x74,0x61,0x72,0x74,0x00,0x00,0x07,0x6f,0x6e,0x43,0x6c,0x6f,
0x73,0x65,0x00,0x00,0x0c,0x6f,0x6e,0x4f,0x70,0x65,0x6e,0x53,0x77,0x69,0x74,0x63,
0x68,0x00,0x00,0x0c,0x6f,0x6e,0x45,0x64,0x69,0x74,0x6f,0x72,0x4c,0x69,0x6e,0x65,
0x00,0x00,0x0c,0x6f,0x6e,0x4f,0x75,0x74,0x70,0x75,0x74,0x4c,0x69,0x6e,0x65,0x00,
0x00,0x00,0x00,0x43,0x00,0x05,0x00,0x07,0x00,0x01,0x00,0x00,0x00,0x07,0x00,0x00,
0x26,0x00,0x08,0x02,0x08,0x00,0x00,0x02,0x99,0x01,0xc0,0x00,0x01,0x40,0x80,0x02,
0x40,0x01,0x00,0x03,0x21,0x00,0x80,0x02,0x29,0x00,0x00,0x02,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x01,0x00,0x0a,0x65,0x61,0x63,0x68,0x5f,0x69,0x6e,0x64,0x65,0x78,
0x00,0x00,0x00,0x00,0xab,0x00,0x03,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x1a,0x00,
0x26,0x00,0x00,0x02,0x15,0x40,0x80,0x01,0x01,0x40,0x00,0x02,0xa0,0x00,0x80,0x01,
0x84,0x00,0x00,0x02,0xa0,0x00,0x80,0x01,0x37,0x00,0x01,0x02,0x15,0x80,0x80,0x02,
0x38,0x40,0x01,0x02,0xa0,0xbf,0x80,0x01,0x16,0x00,0x81,0x01,0x15,0x40,0x80,0x01,
0x01,0x40,0x00,0x02,0xa0,0x00,0x80,0x01,0x84,0x01,0x00,0x02,0xa0,0x00,0x80,0x01,
0x99,0x01,0xc0,0x01,0x15,0x40,0x80,0x01,0x01,0x40,0x00,0x02,0xa0,0x00,0x81,0x01,
0x15,0x00,0x81,0x01,0x19,0x01,0xc0,0x01,0x29,0x40,0x80,0x01,0x97,0x00,0x40,0x00,
0x05,0x00,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x05,
0x00,0x02,0x5b,0x5d,0x00,0x00,0x05,0x62,0x6c,0x6f,0x63,0x6b,0x00,0x00,0x04,0x63,
0x61,0x6c,0x6c,0x00,0x00,0x06,0x72,0x65,0x6d,0x6f,0x76,0x65,0x00,0x00,0x09,0x64,
0x65,0x6c,0x65,0x74,0x65,0x5f,0x61,0x74,0x00,0x00,0x00,0x00,0x5a,0x00,0x04,0x00,
0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x26,0x00,0x10,0x02,0x97,0x00,0x40,0x00,
0x97,0x00,0x40,0x00,0x05,0x00,0x00,0x01,0x0d,0x00,0x00,0x02,0x01,0x40,0x80,0x02,
0xa0,0x40,0x00,0x02,0x01,0x80,0x80,0x02,0xa0,0x80,0x00,0x02,0x29,0x00,0x00,0x02,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x09,0x40,0x63,0x6f,0x6d,0x6d,0x61,
0x6e,0x64,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x04,0x63,0x61,0x6c,0x6c,0x00,
0x00,0x00,0x02,0xe7,0x00,0x0a,0x00,0x0f,0x00,0x01,0x00,0x00,0x00,0x6b,0x00,0x00,
0xa6,0x00,0x30,0x02,0x97,0x01,0x40,0x00,0x97,0x01,0x40,0x00,0x97,0x01,0x40,0x00,
0x97,0x01,0x40,0x00,0x3d,0x00,0x00,0x01,0x05,0x00,0x80,0x01,0x05,0x00,0x00,0x02,
0x0d,0x00,0x00,0x03,0x83,0x04,0x40,0x05,0x0d,0x00,0x80,0x05,0x41,0x80,0x02,0x05,
0x40,0x01,0x80,0x05,0x21,0x40,0x00,0x05,0xbd,0x00,0x00,0x05,0x01,0x80,0x81,0x05,
0x3e,0xc0,0x02,0x05,0x3d,0x01,0x80,0x05,0x3e,0xc0,0x02,0x05,0x01,0x80,0x80,0x05,
0x3e,0xc0,0x02,0x05,0x01,0x80,0x02,0x04,0x19,0x0e,0xc0,0x01,0x0d,0x01,0x00,0x05,
0x01,0xc0,0x80,0x05,0xa0,0xc0,0x00,0x05,0x01,0x80,0x82,0x04,0x99,0x01,0x40,0x05,
0x01,0x40,0x02,0x05,0x01,0x40,0x80,0x05,0xa0,0x00,0x01,0x05,0x99,0x03,0x40,0x05,
0x06,0x00,0x00,0x05,0xbd,0x01,0x80,0x05,0x01,0x40,0x02,0x06,0x3e,0x00,0x83,0x05,
0x3d,0x02,0x00,0x06,0x3e,0x00,0x83,0x05,0xa0,0x40,0x01,0x05,0x01,0xc0,0x00,0x05,
0x11,0x03,0x80,0x05,0xbd,0x02,0x00,0x06,0x01,0x00,0x82,0x06,0x3e,0x40,0x03,0x06,
0x01,0x80,0x82,0x06,0x20,0xc1,0x81,0x05,0x01,0x40,0x00,0x05,0x0d,0x01,0x80,0x05,
0x01,0xc0,0x00,0x06,0x01,0x80,0x82,0x06,0x20,0xc1,0x81,0x05,0x01,0x40,0x00,0x05,
0x11,0x03,0x80,0x05,0x3d,0x03,0x00,0x06,0x01,0x00,0x82,0x06,0x3e,0x40,0x03,0x06,
0x01,0x80,0x82,0x06,0x20,0xc1,0x81,0x05,0xbd,0x03,0x00,0x05,0x01,0x40,0x80,0x05,
0x3e,0xc0,0x02,0x05,0x3d,0x04,0x80,0x05,0x3e,0xc0,0x02,0x05,0x19,0x03,0x40,0x02,
0xbd,0x04,0x80,0x05,0x01,0x00,0x01,0x06,0x3e,0x00,0x83,0x05,0x3d,0x04,0x00,0x06,
0x3e,0x00,0x83,0x05,0x97,0x00,0x40,0x00,0xbd,0x00,0x80,0x05,0xac,0x00,0x02,0x05,
0x11,0x03,0x80,0x05,0x3d,0x05,0x00,0x06,0x01,0x00,0x82,0x06,0x3e,0x40,0x03,0x06,
0x01,0x80,0x82,0x06,0x20,0xc1,0x81,0x05,0xbd,0x05,0x00,0x05,0x11,0x03,0x80,0x05,
0x3d,0x06,0x00,0x06,0x01,0x00,0x82,0x06,0x3e,0x40,0x03,0x06,0x01,0x80,0x82,0x06,
0x20,0xc1,0x81,0x05,0xbd,0x06,0x00,0x05,0x11,0x03,0x80,0x05,0x3d,0x07,0x00,0x06,
0x01,0x00,0x82,0x06,0x3e,0x40,0x03,0x06,0x01,0x80,0x82,0x06,0x20,0xc1,0x81,0x05,
0x01,0x40,0x01,0x05,0x8d,0x04,0x80,0x05,0x01,0x40,0x00,0x06,0x01,0x80,0x82,0x06,
0x20,0xc1,0x81,0x05,0x01,0x80,0x01,0x05,0x0d,0x00,0x80,0x05,0xb2,0x80,0x02,0x05,
0x19,0x02,0x40,0x05,0x0d,0x00,0x00,0x05,0xad,0x00,0x02,0x05,0x0e,0x00,0x00,0x05,
0x97,0x00,0x40,0x00,0x05,0x00,0x00,0x05,0x29,0x00,0x00,0x05,0x00,0x00,0x00,0x0f,
0x00,0x00,0x01,0x2a,0x00,0x00,0x00,0x00,0x00,0x01,0x2e,0x00,0x00,0x21,0x45,0x72,
0x72,0x6f,0x72,0x3a,0x20,0x73,0x68,0x6f,0x72,0x74,0x63,0x75,0x74,0x20,0x61,0x6c,
0x72,0x65,0x61,0x64,0x79,0x20,0x75,0x73,0x65,0x64,0x20,0x69,0x6e,0x20,0x22,0x00,
0x00,0x01,0x22,0x00,0x00,0x11,0x63,0x6f,0x6d,0x6d,0x61,0x6e,0x64,0x2e,0x73,0x68,
0x6f,0x72,0x74,0x63,0x75,0x74,0x2e,0x00,0x00,0x0d,0x63,0x6f,0x6d,0x6d,0x61,0x6e,
0x64,0x2e,0x6e,0x61,0x6d,0x65,0x2e,0x00,0x00,0x1f,0x6d,0x72,0x75,0x62,0x79,0x3a,
0x65,0x76,0x61,0x6c,0x20,0x53,0x63,0x69,0x54,0x45,0x2e,0x63,0x61,0x6c,0x6c,0x5f,
0x63,0x6f,0x6d,0x6d,0x61,0x6e,0x64,0x20,0x27,0x00,0x00,0x01,0x27,0x00,0x00,0x03,
0x2c,0x20,0x27,0x00,0x00,0x08,0x63,0x6f,0x6d,0x6d,0x61,0x6e,0x64,0x2e,0x00,0x00,
0x01,0x33,0x00,0x00,0x12,0x63,0x6f,0x6d,0x6d,0x61,0x6e,0x64,0x2e,0x73,0x75,0x62,
0x73,0x79,0x73,0x74,0x65,0x6d,0x2e,0x00,0x00,0x0d,0x73,0x61,0x76,0x65,0x62,0x65,
0x66,0x6f,0x72,0x65,0x3a,0x6e,0x6f,0x00,0x00,0x0d,0x63,0x6f,0x6d,0x6d,0x61,0x6e,
0x64,0x2e,0x6d,0x6f,0x64,0x65,0x2e,0x00,0x00,0x00,0x0b,0x00,0x09,0x40,0x6d,0x65,
0x6e,0x75,0x5f,0x69,0x64,0x78,0x00,0x00,0x04,0x65,0x61,0x63,0x68,0x00,0x00,0x0f,
0x40,0x73,0x68,0x6f,0x72,0x74,0x63,0x75,0x74,0x73,0x5f,0x75,0x73,0x65,0x64,0x00,
0x00,0x02,0x5b,0x5d,0x00,0x00,0x02,0x21,0x3d,0x00,0x00,0x05,0x72,0x61,0x69,0x73,
0x65,0x00,0x00,0x05,0x50,0x72,0x6f,0x70,0x73,0x00,0x00,0x03,0x5b,0x5d,0x3d,0x00,
0x00,0x01,0x2b,0x00,0x00,0x09,0x40,0x63,0x6f,0x6d,0x6d,0x61,0x6e,0x64,0x73,0x00,
0x00,0x02,0x3d,0x3d,0x00,0x00,0x00,0x00,0x8f,0x00,0x01,0x00,0x05,0x00,0x00,0x00,
0x00,0x00,0x13,0x00,0x26,0x00,0x00,0x02,0x16,0xc0,0x81,0x00,0x11,0x00,0x00,0x01,
0x3d,0x00,0x80,0x01,0x15,0xc0,0x01,0x02,0x3e,0x00,0x81,0x01,0xbd,0x00,0x00,0x02,
0x3e,0x00,0x81,0x01,0x15,0x80,0x00,0x02,0x3e,0x00,0x81,0x01,0xa0,0x40,0x00,0x01,
0x15,0x40,0x80,0x01,0xb2,0x80,0x00,0x01,0x99,0x01,0x40,0x01,0x15,0xc0,0x01,0x01,
0x16,0x80,0x01,0x01,0x97,0x00,0x40,0x00,0x05,0x00,0x00,0x01,0x29,0x00,0x00,0x01,
0x00,0x00,0x00,0x03,0x00,0x00,0x0d,0x63,0x6f,0x6d,0x6d,0x61,0x6e,0x64,0x2e,0x6e,
0x61,0x6d,0x65,0x2e,0x00,0x00,0x01,0x2e,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,
0x05,0x50,0x72,0x6f,0x70,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x02,0x3d,0x3d,
0x00,0x00,0x00,0x00,0xaf,0x00,0x06,0x00,0x0a,0x00,0x01,0x00,0x00,0x00,0x1a,0x00,
0x26,0x00,0x00,0x04,0x11,0x00,0x00,0x02,0x01,0x40,0x00,0x03,0x3d,0x00,0x80,0x03,
0xa0,0x40,0x00,0x03,0x01,0x80,0x81,0x02,0x83,0xff,0xbf,0x03,0x83,0xfe,0x3f,0x04,
0x41,0xc0,0x81,0x03,0xa0,0x80,0x00,0x03,0x40,0x01,0x80,0x03,0x21,0xc0,0x00,0x03,
0x99,0x03,0x40,0x01,0x01,0x00,0x01,0x03,0x01,0x40,0x81,0x03,0x03,0xff,0x3f,0x04,
0xa0,0x80,0x80,0x03,0x01,0x80,0x00,0x04,0x20,0x01,0x01,0x03,0x97,0x02,0x40,0x00,
0x01,0x00,0x01,0x03,0x01,0x40,0x81,0x03,0x03,0xff,0x3f,0x04,0xa0,0x80,0x80,0x03,
0xa0,0x00,0x01,0x03,0x29,0x00,0x00,0x03,0x00,0x00,0x00,0x01,0x00,0x00,0x02,0x3a,
0x3a,0x00,0x00,0x00,0x05,0x00,0x06,0x4f,0x62,0x6a,0x65,0x63,0x74,0x00,0x00,0x05,
0x73,0x70,0x6c,0x69,0x74,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x04,0x65,0x61,0x63,
0x68,0x00,0x00,0x08,0x5f,0x5f,0x73,0x65,0x6e,0x64,0x5f,0x5f,0x00,0x00,0x00,0x00,
0x3e,0x00,0x03,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0x26,0x00,0x00,0x02,
0x15,0x00,0x81,0x01,0x01,0x40,0x00,0x02,0xa0,0x00,0x80,0x01,0x16,0x00,0x81,0x01,
0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x09,0x63,0x6f,
0x6e,0x73,0x74,0x5f,0x67,0x65,0x74,0x00,0x00,0x00,0x00,0x61,0x00,0x03,0x00,0x06,
0x00,0x01,0x00,0x00,0x00,0x0b,0x00,0x00,0x26,0x00,0x00,0x02,0x01,0x40,0x80,0x01,
0x91,0x00,0x00,0x02,0xa0,0x00,0x80,0x01,0x19,0x01,0xc0,0x01,0x01,0x40,0x80,0x01,
0xb7,0xc0,0x80,0x00,0x01,0x40,0x80,0x01,0x40,0x01,0x00,0x02,0x21,0x80,0x80,0x01,
0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x08,0x6b,0x69,
0x6e,0x64,0x5f,0x6f,0x66,0x3f,0x00,0x00,0x06,0x53,0x74,0x72,0x69,0x6e,0x67,0x00,
0x00,0x04,0x65,0x61,0x63,0x68,0x00,0x00,0x00,0x00,0x9c,0x00,0x07,0x00,0x0e,0x00,
0x01,0x00,0x00,0x00,0x17,0x00,0x00,0x00,0x26,0x00,0x00,0x02,0x01,0x40,0x80,0x03,
0x3d,0x00,0x00,0x04,0xa0,0x00,0x80,0x03,0x3a,0xc0,0x81,0x01,0xba,0xc0,0x01,0x02,
0x3a,0xc1,0x81,0x02,0xba,0xc1,0x01,0x03,0x99,0x00,0x40,0x03,0x17,0x01,0x40,0x00,
0x01,0x40,0x01,0x03,0xbd,0x00,0x80,0x02,0x06,0x00,0x80,0x03,0x01,0xc0,0x00,0x04,
0x01,0x40,0x81,0x04,0x01,0x80,0x01,0x05,0x01,0x00,0x81,0x05,0x20,0x00,0x80,0x05,
0x03,0x00,0x40,0x06,0xa0,0x80,0x80,0x05,0x40,0x01,0x00,0x06,0x21,0x42,0x80,0x03,
0x29,0x00,0x80,0x03,0x00,0x00,0x00,0x02,0x00,0x00,0x01,0x7c,0x00,0x00,0x01,0x2a,
0x00,0x00,0x00,0x03,0x00,0x05,0x73,0x70,0x6c,0x69,0x74,0x00,0x00,0x0e,0x64,0x65,
0x66,0x69,0x6e,0x65,0x5f,0x63,0x6f,0x6d,0x6d,0x61,0x6e,0x64,0x00,0x00,0x02,0x5b,
0x5d,0x00,0x00,0x00,0x00,0x53,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x09,
0x26,0x00,0x00,0x02,0x06,0x00,0x80,0x01,0x15,0x00,0x01,0x02,0x20,0x40,0x00,0x02,
0x83,0xff,0xbf,0x02,0xa0,0x80,0x00,0x02,0x01,0x40,0x80,0x02,0x20,0x01,0x80,0x01,
0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x05,0x73,0x65,
0x6e,0x64,0x32,0x00,0x00,0x05,0x73,0x70,0x6c,0x69,0x74,0x00,0x00,0x02,0x5b,0x5d,
0x00,0x00,0x00,0x00,0xc1,0x00,0x04,0x00,0x09,0x00,0x00,0x00,0x00,0x00,0x1b,0x00,
0xa6,0x00,0x10,0x02,0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,0x08,0x00,0x00,0x01,
0x0d,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0xa0,0x40,0x00,0x02,0x98,0x02,0x40,0x02,
0x37,0x00,0x01,0x02,0x0d,0x00,0x80,0x02,0x01,0x40,0x00,0x03,0x01,0x00,0x81,0x03,
0x20,0x81,0x80,0x02,0x0d,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0xa0,0x40,0x00,0x02,
0x04,0x02,0x80,0x02,0x01,0xc0,0x00,0x03,0x84,0x02,0x80,0x03,0x01,0x80,0x00,0x04,
0x3f,0x41,0x81,0x02,0xa0,0xc0,0x00,0x02,0x0d,0x00,0x00,0x02,0x01,0x40,0x80,0x02,
0xa0,0x40,0x00,0x02,0x20,0x80,0x01,0x02,0x29,0x00,0x00,0x02,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x07,0x00,0x0f,0x40,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,
0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x03,0x5b,0x5d,0x3d,
0x00,0x00,0x02,0x3c,0x3c,0x00,0x00,0x05,0x62,0x6c,0x6f,0x63,0x6b,0x00,0x00,0x06,
0x72,0x65,0x6d,0x6f,0x76,0x65,0x00,0x00,0x05,0x75,0x6e,0x69,0x71,0x21,0x00,0x00,
0x00,0x00,0x6b,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,
0xa6,0x00,0x10,0x00,0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,
0x06,0x00,0x80,0x01,0x91,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,
0x21,0x01,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,
0x00,0x11,0x61,0x64,0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,
0x6c,0x65,0x72,0x00,0x00,0x12,0x45,0x56,0x45,0x4e,0x54,0x5f,0x4d,0x41,0x52,0x47,
0x49,0x4e,0x5f,0x43,0x4c,0x49,0x43,0x4b,0x00,0x00,0x00,0x00,0x6b,0x00,0x03,0x00,
0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0xa6,0x00,0x10,0x00,0x97,0x00,0x40,0x00,
0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,0x06,0x00,0x80,0x01,0x91,0x00,0x00,0x02,
0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,0x21,0x01,0x80,0x01,0x29,0x00,0x80,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x11,0x61,0x64,0x64,0x5f,0x65,0x76,
0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x00,0x00,0x12,0x45,0x56,
0x45,0x4e,0x54,0x5f,0x44,0x4f,0x55,0x42,0x4c,0x45,0x5f,0x43,0x4c,0x49,0x43,0x4b,
0x00,0x00,0x00,0x00,0x6e,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,
0xa6,0x00,0x10,0x00,0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,
0x06,0x00,0x80,0x01,0x91,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,
0x21,0x01,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,
0x00,0x11,0x61,0x64,0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,
0x6c,0x65,0x72,0x00,0x00,0x15,0x45,0x56,0x45,0x4e,0x54,0x5f,0x53,0x41,0x56,0x45,
0x5f,0x50,0x4f,0x49,0x4e,0x54,0x5f,0x4c,0x45,0x46,0x54,0x00,0x00,0x00,0x00,0x71,
0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0xa6,0x00,0x10,0x00,
0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,0x06,0x00,0x80,0x01,
0x91,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,0x21,0x01,0x80,0x01,
0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x11,0x61,0x64,
0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x00,
0x00,0x18,0x45,0x56,0x45,0x4e,0x54,0x5f,0x53,0x41,0x56,0x45,0x5f,0x50,0x4f,0x49,
0x4e,0x54,0x5f,0x52,0x45,0x41,0x43,0x48,0x45,0x44,0x00,0x00,0x00,0x00,0x63,0x00,
0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0xa6,0x00,0x10,0x00,
0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,0x06,0x00,0x80,0x01,
0x91,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,0x21,0x01,0x80,0x01,
0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x11,0x61,0x64,
0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x00,
0x00,0x0a,0x45,0x56,0x45,0x4e,0x54,0x5f,0x4f,0x50,0x45,0x4e,0x00,0x00,0x00,0x00,
0x6a,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0xa6,0x00,0x10,0x00,
0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,0x06,0x00,0x80,0x01,
0x91,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,0x21,0x01,0x80,0x01,
0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x11,0x61,0x64,
0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x00,
0x00,0x11,0x45,0x56,0x45,0x4e,0x54,0x5f,0x53,0x57,0x49,0x54,0x43,0x48,0x5f,0x46,
0x49,0x4c,0x45,0x00,0x00,0x00,0x00,0x6a,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,
0x00,0x0a,0x00,0x00,0xa6,0x00,0x10,0x00,0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,
0x08,0x00,0x80,0x00,0x06,0x00,0x80,0x01,0x91,0x00,0x00,0x02,0x01,0x40,0x80,0x02,
0x01,0x80,0x00,0x03,0x21,0x01,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x02,0x00,0x11,0x61,0x64,0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,
0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x00,0x00,0x11,0x45,0x56,0x45,0x4e,0x54,0x5f,
0x53,0x41,0x56,0x45,0x5f,0x42,0x45,0x46,0x4f,0x52,0x45,0x00,0x00,0x00,0x00,0x63,
0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0xa6,0x00,0x10,0x00,
0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,0x06,0x00,0x80,0x01,
0x91,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,0x21,0x01,0x80,0x01,
0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x11,0x61,0x64,
0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x00,
0x00,0x0a,0x45,0x56,0x45,0x4e,0x54,0x5f,0x53,0x41,0x56,0x45,0x00,0x00,0x00,0x00,
0x68,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0xa6,0x00,0x10,0x00,
0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,0x06,0x00,0x80,0x01,
0x91,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,0x21,0x01,0x80,0x01,
0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x11,0x61,0x64,
0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x00,
0x00,0x0f,0x45,0x56,0x45,0x4e,0x54,0x5f,0x55,0x50,0x44,0x41,0x54,0x45,0x5f,0x55,
0x49,0x00,0x00,0x00,0x00,0x63,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x0a,
0xa6,0x00,0x10,0x00,0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,
0x06,0x00,0x80,0x01,0x91,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,
0x21,0x01,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,
0x00,0x11,0x61,0x64,0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,
0x6c,0x65,0x72,0x00,0x00,0x0a,0x45,0x56,0x45,0x4e,0x54,0x5f,0x43,0x48,0x41,0x52,
0x00,0x00,0x00,0x00,0x62,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,
0xa6,0x00,0x10,0x00,0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,
0x06,0x00,0x80,0x01,0x91,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,
0x21,0x01,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,
0x00,0x11,0x61,0x64,0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,
0x6c,0x65,0x72,0x00,0x00,0x09,0x45,0x56,0x45,0x4e,0x54,0x5f,0x4b,0x45,0x59,0x00,
0x00,0x00,0x00,0x6a,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,
0xa6,0x00,0x10,0x00,0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,
0x06,0x00,0x80,0x01,0x91,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,
0x21,0x01,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,
0x00,0x11,0x61,0x64,0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,
0x6c,0x65,0x72,0x00,0x00,0x11,0x45,0x56,0x45,0x4e,0x54,0x5f,0x44,0x57,0x45,0x4c,
0x4c,0x5f,0x53,0x54,0x41,0x52,0x54,0x00,0x00,0x00,0x00,0x64,0x00,0x03,0x00,0x07,
0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0xa6,0x00,0x10,0x00,0x97,0x00,0x40,0x00,
0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,0x06,0x00,0x80,0x01,0x91,0x00,0x00,0x02,
0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,0x21,0x01,0x80,0x01,0x29,0x00,0x80,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x11,0x61,0x64,0x64,0x5f,0x65,0x76,
0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x00,0x00,0x0b,0x45,0x56,
0x45,0x4e,0x54,0x5f,0x43,0x4c,0x4f,0x53,0x45,0x00,0x00,0x00,0x00,0x6a,0x00,0x03,
0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0xa6,0x00,0x10,0x00,0x97,0x00,0x40,0x00,
0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,0x06,0x00,0x80,0x01,0x91,0x00,0x00,0x02,
0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,0x21,0x01,0x80,0x01,0x29,0x00,0x80,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x11,0x61,0x64,0x64,0x5f,0x65,0x76,
0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x00,0x00,0x11,0x45,0x56,
0x45,0x4e,0x54,0x5f,0x4f,0x50,0x45,0x4e,0x5f,0x53,0x57,0x49,0x54,0x43,0x48,0x00,
0x00,0x00,0x00,0x6a,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,
0xa6,0x00,0x10,0x00,0x97,0x00,0x40,0x00,0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,
0x06,0x00,0x80,0x01,0x91,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,
0x21,0x01,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,
0x00,0x11,0x61,0x64,0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,
0x6c,0x65,0x72,0x00,0x00,0x11,0x45,0x56,0x45,0x4e,0x54,0x5f,0x45,0x44,0x49,0x54,
0x4f,0x52,0x5f,0x4c,0x49,0x4e,0x45,0x00,0x00,0x00,0x00,0x6a,0x00,0x03,0x00,0x07,
0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0xa6,0x00,0x10,0x00,0x97,0x00,0x40,0x00,
0x97,0x00,0x40,0x00,0x08,0x00,0x80,0x00,0x06,0x00,0x80,0x01,0x91,0x00,0x00,0x02,
0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,0x21,0x01,0x80,0x01,0x29,0x00,0x80,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x11,0x61,0x64,0x64,0x5f,0x65,0x76,
0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x00,0x00,0x11,0x45,0x56,
0x45,0x4e,0x54,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x5f,0x4c,0x49,0x4e,0x45,0x00,
0x00,0x00,0x00,0xb0,0x00,0x03,0x00,0x08,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,
0xa6,0x00,0x00,0x02,0x37,0xc0,0x80,0x01,0x0d,0x00,0x00,0x02,0x11,0x01,0x80,0x02,
0x01,0xc0,0x00,0x03,0x20,0x41,0x00,0x02,0x99,0x02,0x40,0x01,0x06,0x00,0x80,0x01,
0x11,0x01,0x00,0x02,0x08,0x00,0x80,0x02,0x01,0x80,0x00,0x03,0x21,0xc1,0x80,0x01,
0x11,0x02,0x80,0x01,0x01,0x40,0x00,0x02,0xa0,0x40,0x81,0x01,0x29,0x00,0x80,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0x0f,0x40,0x65,0x76,0x65,0x6e,0x74,
0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x03,0x5b,0x5d,0x3d,0x00,
0x00,0x0b,0x45,0x56,0x45,0x4e,0x54,0x5f,0x53,0x54,0x52,0x49,0x50,0x00,0x00,0x11,
0x61,0x64,0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,
0x72,0x00,0x00,0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x11,0x73,0x74,0x72,0x69,
0x70,0x5f,0x73,0x68,0x6f,0x77,0x5f,0x69,0x6e,0x74,0x65,0x72,0x6e,0x00,0x00,0x00,
0x01,0x59,0x00,0x08,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x2e,0xa6,0x00,0x20,0x02,
0x17,0x01,0x40,0x00,0x17,0x01,0x40,0x00,0x17,0x01,0x40,0x00,0x83,0xff,0x3f,0x01,
0x03,0x06,0xc0,0x01,0x3d,0x00,0x80,0x02,0x01,0x40,0x00,0x04,0x01,0x80,0x80,0x04,
0x03,0xff,0x3f,0x05,0x41,0x40,0x82,0x04,0xa0,0x00,0x00,0x04,0x01,0x40,0x81,0x04,
0xa0,0x40,0x00,0x04,0x01,0x00,0x02,0x03,0x06,0x00,0x00,0x04,0x91,0x01,0x80,0x04,
0x07,0x00,0x00,0x05,0x01,0x00,0x81,0x05,0x21,0x81,0x00,0x04,0x11,0x02,0x00,0x04,
0x20,0x40,0x01,0x04,0x19,0x01,0x40,0x04,0x11,0x02,0x00,0x04,0x97,0x00,0x40,0x00,
0x11,0x03,0x00,0x04,0x01,0x00,0x82,0x03,0x01,0x40,0x01,0x04,0x20,0xc0,0x01,0x04,
0x83,0xff,0xbf,0x04,0xa0,0x00,0x00,0x04,0x01,0xc0,0x81,0x04,0x01,0x00,0x02,0x05,
0xa0,0x00,0x82,0x04,0x01,0xc0,0x01,0x04,0x01,0xc0,0x80,0x04,0x01,0x80,0x01,0x05,
0x20,0x41,0x02,0x04,0xbd,0x00,0x00,0x04,0x20,0xc0,0x01,0x04,0x83,0xff,0xbf,0x04,
0xa0,0x00,0x00,0x04,0x01,0xc0,0x81,0x04,0x01,0x00,0x02,0x05,0xa0,0x00,0x82,0x04,
0x29,0x00,0x00,0x04,0x00,0x00,0x00,0x02,0x00,0x00,0x01,0x3b,0x00,0x00,0x01,0x20,
0x00,0x00,0x00,0x0a,0x00,0x02,0x5b,0x5d,0x00,0x00,0x04,0x6a,0x6f,0x69,0x6e,0x00,
0x00,0x11,0x61,0x64,0x64,0x5f,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,
0x6c,0x65,0x72,0x00,0x00,0x19,0x45,0x56,0x45,0x4e,0x54,0x5f,0x55,0x53,0x45,0x52,
0x5f,0x4c,0x49,0x53,0x54,0x5f,0x53,0x45,0x4c,0x45,0x43,0x54,0x49,0x4f,0x4e,0x00,
0x00,0x06,0x45,0x64,0x69,0x74,0x6f,0x72,0x00,0x00,0x05,0x66,0x6f,0x63,0x75,0x73,
0x00,0x00,0x06,0x4f,0x75,0x74,0x70,0x75,0x74,0x00,0x00,0x05,0x62,0x79,0x74,0x65,
0x73,0x00,0x00,0x0f,0x61,0x75,0x74,0x6f,0x43,0x53,0x65,0x70,0x61,0x72,0x61,0x74,
0x6f,0x72,0x3d,0x00,0x00,0x0c,0x75,0x73,0x65,0x72,0x4c,0x69,0x73,0x74,0x53,0x68,
0x6f,0x77,0x00,0x00,0x00,0x00,0x46,0x00,0x02,0x00,0x05,0x00,0x00,0x00,0x00,0x00,
0x05,0x00,0x00,0x00,0x26,0x00,0x00,0x00,0x11,0x00,0x00,0x01,0x3d,0x00,0x80,0x01,
0xa0,0x40,0x00,0x01,0x29,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x08,0x46,
0x69,0x6c,0x65,0x50,0x61,0x74,0x68,0x00,0x00,0x00,0x02,0x00,0x05,0x50,0x72,0x6f,
0x70,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x00,0x01,0xd5,0x00,0x03,0x00,0x07,
0x00,0x02,0x00,0x00,0x00,0x3d,0x00,0x00,0x26,0x00,0x00,0x00,0x11,0x00,0x80,0x01,
0x04,0x01,0x00,0x02,0xa0,0x40,0x80,0x01,0x99,0x01,0xc0,0x01,0x91,0x01,0x80,0x01,
0x84,0x02,0x00,0x02,0xa0,0x00,0x81,0x01,0x19,0x19,0xc0,0x01,0x11,0x03,0x80,0x01,
0x3d,0x00,0x00,0x02,0xa0,0xc0,0x81,0x01,0x20,0x00,0x82,0x01,0x03,0x00,0x40,0x02,
0xb2,0x40,0x82,0x01,0x19,0x08,0xc0,0x01,0xbd,0x00,0x80,0x01,0x11,0x03,0x00,0x02,
0x3d,0x01,0x80,0x02,0xa0,0xc0,0x01,0x02,0x3e,0x00,0x81,0x01,0xbd,0x01,0x00,0x02,
0x3e,0x00,0x81,0x01,0x01,0xc0,0x00,0x01,0x11,0x01,0x80,0x01,0x01,0x80,0x00,0x02,
0xa0,0x80,0x82,0x01,0x19,0x02,0xc0,0x01,0x11,0x01,0x80,0x01,0x01,0x80,0x00,0x02,
0x40,0x01,0x80,0x02,0xa1,0xc0,0x82,0x01,0x11,0x03,0x80,0x01,0x3d,0x02,0x00,0x02,
0xa0,0xc0,0x81,0x01,0x01,0xc0,0x00,0x01,0x20,0x00,0x83,0x01,0xbd,0x00,0x00,0x02,
0xb2,0x40,0x82,0x01,0x19,0x04,0xc0,0x01,0xbd,0x00,0x80,0x01,0x11,0x03,0x00,0x02,
0xbd,0x02,0x80,0x02,0xa0,0xc0,0x01,0x02,0x3e,0x00,0x81,0x01,0xbd,0x01,0x00,0x02,
0x3e,0x00,0x81,0x01,0x01,0xc0,0x00,0x01,0x11,0x01,0x80,0x01,0x01,0x80,0x00,0x02,
0xa0,0x80,0x82,0x01,0x99,0x02,0xc0,0x01,0x11,0x01,0x80,0x01,0x01,0x80,0x00,0x02,
0x40,0x03,0x80,0x02,0xa1,0xc0,0x82,0x01,0x97,0x00,0x40,0x00,0x05,0x00,0x80,0x01,
0x97,0x00,0x40,0x00,0x05,0x00,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x06,
0x00,0x00,0x08,0x50,0x4c,0x41,0x54,0x5f,0x57,0x49,0x4e,0x00,0x00,0x00,0x00,0x00,
0x10,0x53,0x63,0x69,0x74,0x65,0x44,0x65,0x66,0x61,0x75,0x6c,0x74,0x48,0x6f,0x6d,
0x65,0x00,0x00,0x0c,0x2f,0x73,0x63,0x69,0x74,0x65,0x5f,0x6d,0x72,0x75,0x62,0x79,
0x00,0x00,0x13,0x65,0x78,0x74,0x2e,0x6d,0x72,0x75,0x62,0x79,0x2e,0x64,0x69,0x72,
0x65,0x63,0x74,0x6f,0x72,0x79,0x00,0x00,0x0d,0x53,0x63,0x69,0x74,0x65,0x55,0x73,
0x65,0x72,0x48,0x6f,0x6d,0x65,0x00,0x00,0x00,0x0d,0x00,0x06,0x4d,0x6f,0x64,0x75,
0x6c,0x65,0x00,0x00,0x0e,0x63,0x6f,0x6e,0x73,0x74,0x5f,0x64,0x65,0x66,0x69,0x6e,
0x65,0x64,0x3f,0x00,0x00,0x03,0x44,0x69,0x72,0x00,0x00,0x06,0x4b,0x65,0x72,0x6e,
0x65,0x6c,0x00,0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,0x64,0x5f,0x74,0x6f,0x3f,
0x00,0x00,0x04,0x6c,0x6f,0x61,0x64,0x00,0x00,0x05,0x50,0x72,0x6f,0x70,0x73,0x00,
0x00,0x02,0x5b,0x5d,0x00,0x00,0x04,0x74,0x6f,0x5f,0x69,0x00,0x00,0x02,0x3d,0x3d,
0x00,0x00,0x06,0x65,0x78,0x69,0x73,0x74,0x3f,0x00,0x00,0x07,0x66,0x6f,0x72,0x65,
0x61,0x63,0x68,0x00,0x00,0x04,0x74,0x6f,0x5f,0x73,0x00,0x00,0x00,0x00,0xde,0x00,
0x04,0x00,0x08,0x00,0x00,0x00,0x00,0x00,0x22,0x00,0x00,0x00,0x26,0x00,0x00,0x02,
0x1a,0x09,0x40,0x00,0x01,0x40,0x00,0x02,0x03,0xfe,0xbf,0x02,0x03,0xff,0x3f,0x03,
0x41,0x40,0x81,0x02,0xa0,0x00,0x00,0x02,0x3d,0x00,0x80,0x02,0xb2,0x40,0x00,0x02,
0x19,0x04,0x40,0x02,0x06,0x00,0x00,0x02,0x15,0x80,0x80,0x02,0xbd,0x00,0x00,0x03,
0xac,0xc0,0x80,0x02,0x01,0x40,0x00,0x03,0xac,0xc0,0x80,0x02,0xa0,0x80,0x00,0x02,
0x97,0x00,0x40,0x00,0x05,0x00,0x00,0x02,0x17,0x06,0x40,0x00,0x1b,0x00,0x00,0x02,
0x11,0x02,0x80,0x02,0x01,0x00,0x01,0x03,0xa0,0x40,0x81,0x02,0x98,0x00,0xc0,0x02,
0x97,0x02,0x40,0x00,0x01,0x00,0x81,0x01,0x06,0x00,0x00,0x02,0x01,0xc0,0x80,0x02,
0xa0,0x80,0x01,0x02,0x17,0x01,0x40,0x00,0x1d,0x00,0x00,0x02,0x1c,0x00,0x80,0x00,
0x29,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x00,0x00,0x03,0x2e,0x72,0x62,0x00,0x00,
0x01,0x2f,0x00,0x00,0x00,0x07,0x00,0x02,0x5b,0x5d,0x00,0x00,0x02,0x3d,0x3d,0x00,
0x00,0x04,0x6c,0x6f,0x61,0x64,0x00,0x00,0x01,0x2b,0x00,0x00,0x0d,0x53,0x74,0x61,
0x6e,0x64,0x61,0x72,0x64,0x45,0x72,0x72,0x6f,0x72,0x00,0x00,0x03,0x3d,0x3d,0x3d,
0x00,0x00,0x04,0x70,0x75,0x74,0x73,0x00,0x00,0x00,0x00,0xde,0x00,0x04,0x00,0x08,
0x00,0x00,0x00,0x00,0x00,0x22,0x00,0x00,0x26,0x00,0x00,0x02,0x1a,0x09,0x40,0x00,
0x01,0x40,0x00,0x02,0x03,0xfe,0xbf,0x02,0x03,0xff,0x3f,0x03,0x41,0x40,0x81,0x02,
0xa0,0x00,0x00,0x02,0x3d,0x00,0x80,0x02,0xb2,0x40,0x00,0x02,0x19,0x04,0x40,0x02,
0x06,0x00,0x00,0x02,0x15,0x80,0x80,0x02,0xbd,0x00,0x00,0x03,0xac,0xc0,0x80,0x02,
0x01,0x40,0x00,0x03,0xac,0xc0,0x80,0x02,0xa0,0x80,0x00,0x02,0x97,0x00,0x40,0x00,
0x05,0x00,0x00,0x02,0x17,0x06,0x40,0x00,0x1b,0x00,0x00,0x02,0x11,0x02,0x80,0x02,
0x01,0x00,0x01,0x03,0xa0,0x40,0x81,0x02,0x98,0x00,0xc0,0x02,0x97,0x02,0x40,0x00,
0x01,0x00,0x81,0x01,0x06,0x00,0x00,0x02,0x01,0xc0,0x80,0x02,0xa0,0x80,0x01,0x02,
0x17,0x01,0x40,0x00,0x1d,0x00,0x00,0x02,0x1c,0x00,0x80,0x00,0x29,0x00,0x00,0x02,
0x00,0x00,0x00,0x02,0x00,0x00,0x03,0x2e,0x72,0x62,0x00,0x00,0x01,0x2f,0x00,0x00,
0x00,0x07,0x00,0x02,0x5b,0x5d,0x00,0x00,0x02,0x3d,0x3d,0x00,0x00,0x04,0x6c,0x6f,
0x61,0x64,0x00,0x00,0x01,0x2b,0x00,0x00,0x0d,0x53,0x74,0x61,0x6e,0x64,0x61,0x72,
0x64,0x45,0x72,0x72,0x6f,0x72,0x00,0x00,0x03,0x3d,0x3d,0x3d,0x00,0x00,0x04,0x70,
0x75,0x74,0x73,0x00,0x00,0x00,0x00,0xb8,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,
0x00,0x16,0x00,0x00,0x26,0x00,0x00,0x02,0x01,0x40,0x80,0x01,0x03,0xff,0x3f,0x02,
0xa0,0x00,0x80,0x01,0x3d,0x00,0x00,0x02,0xa0,0x40,0x80,0x01,0x99,0x02,0xc0,0x01,
0x01,0x40,0x80,0x01,0x03,0xff,0x3f,0x02,0xa0,0x00,0x80,0x01,0xbd,0x00,0x00,0x02,
0xa0,0x40,0x80,0x01,0x99,0x03,0xc0,0x01,0x06,0x00,0x80,0x01,0x06,0x00,0x00,0x02,
0x20,0xc0,0x00,0x02,0x11,0x02,0x80,0x02,0xa0,0x00,0x00,0x02,0x01,0x40,0x80,0x02,
0x20,0x81,0x80,0x01,0x08,0x00,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x02,
0x00,0x00,0x01,0x5c,0x00,0x00,0x01,0x2f,0x00,0x00,0x00,0x05,0x00,0x02,0x5b,0x5d,
0x00,0x00,0x02,0x21,0x3d,0x00,0x00,0x0c,0x64,0x69,0x73,0x70,0x61,0x74,0x63,0x68,
0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,
0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x11,0x45,0x56,0x45,0x4e,0x54,0x5f,0x4f,0x50,
0x45,0x4e,0x5f,0x53,0x57,0x49,0x54,0x43,0x48,0x00,0x00,0x00,0x00,0xcd,0x00,0x06,
0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x1b,0x26,0x00,0x00,0x02,0x01,0x40,0x00,0x03,
0x20,0x00,0x00,0x03,0x01,0x80,0x81,0x01,0x01,0x40,0x00,0x03,0x01,0xc0,0x80,0x03,
0xa0,0x40,0x00,0x03,0xaf,0x80,0x00,0x03,0x01,0x80,0x01,0x02,0x01,0x40,0x00,0x03,
0x20,0xc0,0x00,0x03,0x83,0xff,0xbf,0x03,0xb2,0x00,0x01,0x03,0x19,0x01,0x40,0x03,
0x03,0x01,0x40,0x03,0x97,0x00,0x40,0x00,0x83,0x00,0x40,0x03,0x01,0x80,0x81,0x02,
0x01,0x40,0x00,0x03,0x01,0x00,0x81,0x03,0xa0,0x40,0x01,0x03,0x83,0xff,0xbf,0x03,
0x01,0x40,0x01,0x04,0x20,0xc0,0x01,0x04,0x41,0xc0,0x81,0x03,0xa0,0x80,0x01,0x03,
0x29,0x00,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x0a,0x63,0x75,
0x72,0x72,0x65,0x6e,0x74,0x50,0x6f,0x73,0x00,0x00,0x10,0x6c,0x69,0x6e,0x65,0x46,
0x72,0x6f,0x6d,0x50,0x6f,0x73,0x69,0x74,0x69,0x6f,0x6e,0x00,0x00,0x01,0x2d,0x00,
0x00,0x07,0x45,0x4f,0x4c,0x4d,0x6f,0x64,0x65,0x00,0x00,0x02,0x3d,0x3d,0x00,0x00,
0x07,0x67,0x65,0x74,0x4c,0x69,0x6e,0x65,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x02,
0x2d,0x40,0x00,0x00,0x00,0x01,0xb1,0x00,0x04,0x00,0x09,0x00,0x00,0x00,0x00,0x00,
0x41,0x00,0x00,0x00,0x26,0x00,0x00,0x02,0x01,0x40,0x00,0x02,0x3d,0x00,0x80,0x02,
0xa0,0x00,0x00,0x02,0x19,0x01,0x40,0x02,0x08,0x00,0x00,0x02,0x29,0x00,0x00,0x02,
0x91,0x00,0x00,0x02,0x20,0x80,0x00,0x02,0x01,0x00,0x81,0x01,0x99,0x0d,0x40,0x02,
0x06,0x00,0x00,0x02,0x20,0xc0,0x00,0x02,0x91,0x02,0x80,0x02,0xa0,0x00,0x01,0x02,
0x20,0x80,0x01,0x02,0x98,0x03,0x40,0x02,0x06,0x00,0x00,0x02,0x20,0xc0,0x00,0x02,
0x91,0x02,0x80,0x02,0xa0,0x00,0x01,0x02,0x20,0xc0,0x01,0x02,0x83,0xff,0xbf,0x02,
0xb2,0x00,0x02,0x02,0x19,0x01,0x40,0x02,0x08,0x00,0x00,0x02,0x29,0x00,0x00,0x02,
0x06,0x00,0x00,0x02,0x06,0x00,0x80,0x02,0x20,0xc0,0x80,0x02,0x91,0x02,0x00,0x03,
0xa0,0x00,0x81,0x02,0x06,0x00,0x00,0x03,0x91,0x00,0x80,0x03,0xa0,0x80,0x02,0x03,
0x20,0x41,0x02,0x02,0x08,0x00,0x00,0x02,0x17,0x0d,0x40,0x00,0x06,0x00,0x00,0x02,
0x20,0xc0,0x00,0x02,0x91,0x05,0x80,0x02,0xa0,0x00,0x01,0x02,0x20,0x80,0x01,0x02,
0x98,0x03,0x40,0x02,0x06,0x00,0x00,0x02,0x20,0xc0,0x00,0x02,0x91,0x05,0x80,0x02,
0xa0,0x00,0x01,0x02,0x20,0xc0,0x01,0x02,0x83,0xff,0xbf,0x02,0xb2,0x00,0x02,0x02,
0x19,0x01,0x40,0x02,0x08,0x00,0x00,0x02,0x29,0x00,0x00,0x02,0x06,0x00,0x00,0x02,
0x06,0x00,0x80,0x02,0x20,0xc0,0x80,0x02,0x91,0x05,0x00,0x03,0xa0,0x00,0x81,0x02,
0x06,0x00,0x00,0x03,0x11,0x06,0x80,0x03,0xa0,0x80,0x02,0x03,0x20,0x41,0x02,0x02,
0x07,0x00,0x00,0x02,0x29,0x00,0x00,0x02,0x00,0x00,0x00,0x01,0x00,0x00,0x01,0x0a,
0x00,0x00,0x00,0x0d,0x00,0x02,0x21,0x3d,0x00,0x00,0x06,0x45,0x64,0x69,0x74,0x6f,
0x72,0x00,0x00,0x05,0x66,0x6f,0x63,0x75,0x73,0x00,0x00,0x0e,0x65,0x76,0x65,0x6e,
0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,
0x00,0x11,0x45,0x56,0x45,0x4e,0x54,0x5f,0x45,0x44,0x49,0x54,0x4f,0x52,0x5f,0x4c,
0x49,0x4e,0x45,0x00,0x00,0x01,0x21,0x00,0x00,0x06,0x6c,0x65,0x6e,0x67,0x74,0x68,
0x00,0x00,0x02,0x3d,0x3d,0x00,0x00,0x0c,0x64,0x69,0x73,0x70,0x61,0x74,0x63,0x68,
0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x67,0x72,0x61,0x62,0x5f,0x6c,0x69,0x6e,0x65,
0x5f,0x66,0x72,0x6f,0x6d,0x00,0x00,0x11,0x45,0x56,0x45,0x4e,0x54,0x5f,0x4f,0x55,
0x54,0x50,0x55,0x54,0x5f,0x4c,0x49,0x4e,0x45,0x00,0x00,0x06,0x4f,0x75,0x74,0x70,
0x75,0x74,0x00,0x00,0x00,0x01,0x69,0x00,0x01,0x00,0x05,0x00,0x00,0x00,0x00,0x00,
0x1d,0x00,0x00,0x00,0x06,0x00,0x80,0x00,0x84,0x00,0x00,0x01,0x04,0x01,0x80,0x01,
0x20,0x01,0x80,0x00,0x06,0x00,0x80,0x00,0x84,0x01,0x00,0x01,0x04,0x02,0x80,0x01,
0x20,0x01,0x80,0x00,0x06,0x00,0x80,0x00,0x84,0x02,0x00,0x01,0x04,0x03,0x80,0x01,
0x20,0x01,0x80,0x00,0x06,0x00,0x80,0x00,0x84,0x03,0x00,0x01,0x04,0x04,0x80,0x01,
0x20,0x01,0x80,0x00,0x06,0x00,0x80,0x00,0x84,0x04,0x00,0x01,0x04,0x05,0x80,0x01,
0x20,0x01,0x80,0x00,0x06,0x00,0x80,0x00,0x84,0x05,0x00,0x01,0x04,0x06,0x80,0x01,
0x20,0x01,0x80,0x00,0x06,0x00,0x80,0x00,0x84,0x06,0x00,0x01,0x04,0x07,0x80,0x01,
0x20,0x01,0x80,0x00,0x29,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0f,
0x00,0x0c,0x61,0x6c,0x69,0x61,0x73,0x5f,0x6d,0x65,0x74,0x68,0x6f,0x64,0x00,0x00,
0x0c,0x73,0x74,0x61,0x72,0x74,0x53,0x74,0x79,0x6c,0x69,0x6e,0x67,0x00,0x00,0x0d,
0x73,0x74,0x61,0x72,0x74,0x5f,0x73,0x74,0x79,0x6c,0x69,0x6e,0x67,0x00,0x00,0x0a,
0x65,0x6e,0x64,0x53,0x74,0x79,0x6c,0x69,0x6e,0x67,0x00,0x00,0x0b,0x65,0x6e,0x64,
0x5f,0x73,0x74,0x79,0x6c,0x69,0x6e,0x67,0x00,0x00,0x0b,0x61,0x74,0x4c,0x69,0x6e,
0x65,0x53,0x74,0x61,0x72,0x74,0x00,0x00,0x0d,0x61,0x74,0x5f,0x6c,0x69,0x6e,0x65,
0x5f,0x73,0x74,0x61,0x72,0x74,0x00,0x00,0x09,0x61,0x74,0x4c,0x69,0x6e,0x65,0x45,
0x6e,0x64,0x00,0x00,0x0b,0x61,0x74,0x5f,0x6c,0x69,0x6e,0x65,0x5f,0x65,0x6e,0x64,
0x00,0x00,0x08,0x73,0x65,0x74,0x53,0x74,0x61,0x74,0x65,0x00,0x00,0x09,0x73,0x65,
0x74,0x5f,0x73,0x74,0x61,0x74,0x65,0x00,0x00,0x0f,0x66,0x6f,0x72,0x77,0x61,0x72,
0x64,0x53,0x65,0x74,0x53,0x74,0x61,0x74,0x65,0x00,0x00,0x11,0x66,0x6f,0x72,0x77,
0x61,0x72,0x64,0x5f,0x73,0x65,0x74,0x5f,0x73,0x74,0x61,0x74,0x65,0x00,0x00,0x0b,
0x63,0x68,0x61,0x6e,0x67,0x65,0x53,0x74,0x61,0x74,0x65,0x00,0x00,0x0c,0x63,0x68,
0x61,0x6e,0x67,0x65,0x5f,0x73,0x74,0x61,0x74,0x65,0x00,0x00,0x00,0x00,0x41,0x00,
0x03,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x26,0x00,0x00,0x02,
0x06,0x00,0x80,0x01,0x01,0x40,0x00,0x02,0xa0,0x00,0x80,0x01,0x29,0x00,0x80,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x10,0x6f,0x6e,0x5f,0x62,0x75,0x66,
0x66,0x65,0x72,0x5f,0x73,0x77,0x69,0x74,0x63,0x68,0x00,0x00,0x00,0x00,0x41,0x00,
0x03,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x26,0x00,0x00,0x02,
0x06,0x00,0x80,0x01,0x01,0x40,0x00,0x02,0xa0,0x00,0x80,0x01,0x29,0x00,0x80,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x10,0x6f,0x6e,0x5f,0x62,0x75,0x66,
0x66,0x65,0x72,0x5f,0x73,0x77,0x69,0x74,0x63,0x68,0x00,0x00,0x00,0x00,0x3d,0x00,
0x03,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x26,0x00,0x00,0x02,
0x06,0x00,0x80,0x01,0x01,0x40,0x00,0x02,0xa0,0x00,0x80,0x01,0x29,0x00,0x80,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x0c,0x6f,0x6e,0x5f,0x6c,0x69,0x6e,
0x65,0x5f,0x63,0x68,0x61,0x72,0x00,0x00,0x00,0x00,0xba,0x00,0x02,0x00,0x06,0x00,
0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x26,0x00,0x00,0x00,0x06,0x00,0x00,0x01,
0x84,0x00,0x80,0x01,0xa0,0x00,0x00,0x01,0x99,0x01,0x40,0x01,0x06,0x00,0x00,0x01,
0x20,0x40,0x00,0x01,0x29,0x00,0x00,0x01,0x11,0x01,0x00,0x01,0x11,0x01,0x80,0x01,
0x20,0x00,0x81,0x01,0x11,0x01,0x00,0x02,0x13,0x03,0x00,0x02,0xa0,0x40,0x81,0x01,
0xa0,0xc0,0x00,0x01,0x29,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,
0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x0d,
0x6f,0x6e,0x4d,0x61,0x72,0x67,0x69,0x6e,0x43,0x6c,0x69,0x63,0x6b,0x00,0x00,0x05,
0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0c,0x64,0x69,0x73,0x70,0x61,0x74,0x63,0x68,
0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,
0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x12,0x45,0x56,0x45,
0x4e,0x54,0x5f,0x4d,0x41,0x52,0x47,0x49,0x4e,0x5f,0x43,0x4c,0x49,0x43,0x4b,0x00,
0x00,0x00,0x00,0xba,0x00,0x02,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,
0x26,0x00,0x00,0x00,0x06,0x00,0x00,0x01,0x84,0x00,0x80,0x01,0xa0,0x00,0x00,0x01,
0x99,0x01,0x40,0x01,0x06,0x00,0x00,0x01,0x20,0x40,0x00,0x01,0x29,0x00,0x00,0x01,
0x11,0x01,0x00,0x01,0x11,0x01,0x80,0x01,0x20,0x00,0x81,0x01,0x11,0x01,0x00,0x02,
0x13,0x03,0x00,0x02,0xa0,0x40,0x81,0x01,0xa0,0xc0,0x00,0x01,0x29,0x00,0x00,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,
0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x0d,0x6f,0x6e,0x44,0x6f,0x75,0x62,0x6c,0x65,
0x43,0x6c,0x69,0x63,0x6b,0x00,0x00,0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0c,
0x64,0x69,0x73,0x70,0x61,0x74,0x63,0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,
0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,
0x5b,0x5d,0x00,0x00,0x12,0x45,0x56,0x45,0x4e,0x54,0x5f,0x44,0x4f,0x55,0x42,0x4c,
0x45,0x5f,0x43,0x4c,0x49,0x43,0x4b,0x00,0x00,0x00,0x00,0xbf,0x00,0x02,0x00,0x06,
0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x26,0x00,0x00,0x00,0x06,0x00,0x00,0x01,
0x84,0x00,0x80,0x01,0xa0,0x00,0x00,0x01,0x99,0x01,0x40,0x01,0x06,0x00,0x00,0x01,
0x20,0x40,0x00,0x01,0x29,0x00,0x00,0x01,0x11,0x01,0x00,0x01,0x11,0x01,0x80,0x01,
0x20,0x00,0x81,0x01,0x11,0x01,0x00,0x02,0x13,0x03,0x00,0x02,0xa0,0x40,0x81,0x01,
0xa0,0xc0,0x00,0x01,0x29,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,
0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x0f,
0x6f,0x6e,0x53,0x61,0x76,0x65,0x50,0x6f,0x69,0x6e,0x74,0x4c,0x65,0x66,0x74,0x00,
0x00,0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0c,0x64,0x69,0x73,0x70,0x61,0x74,
0x63,0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,
0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x15,0x45,
0x56,0x45,0x4e,0x54,0x5f,0x53,0x41,0x56,0x45,0x5f,0x50,0x4f,0x49,0x4e,0x54,0x5f,
0x4c,0x45,0x46,0x54,0x00,0x00,0x00,0x00,0xc5,0x00,0x02,0x00,0x06,0x00,0x00,0x00,
0x00,0x00,0x10,0x00,0x26,0x00,0x00,0x00,0x06,0x00,0x00,0x01,0x84,0x00,0x80,0x01,
0xa0,0x00,0x00,0x01,0x99,0x01,0x40,0x01,0x06,0x00,0x00,0x01,0x20,0x40,0x00,0x01,
0x29,0x00,0x00,0x01,0x11,0x01,0x00,0x01,0x11,0x01,0x80,0x01,0x20,0x00,0x81,0x01,
0x11,0x01,0x00,0x02,0x13,0x03,0x00,0x02,0xa0,0x40,0x81,0x01,0xa0,0xc0,0x00,0x01,
0x29,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x0b,0x72,0x65,
0x73,0x70,0x6f,0x6e,0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x12,0x6f,0x6e,0x53,0x61,
0x76,0x65,0x50,0x6f,0x69,0x6e,0x74,0x52,0x65,0x61,0x63,0x68,0x65,0x64,0x00,0x00,
0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0c,0x64,0x69,0x73,0x70,0x61,0x74,0x63,
0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,
0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x18,0x45,0x56,
0x45,0x4e,0x54,0x5f,0x53,0x41,0x56,0x45,0x5f,0x50,0x4f,0x49,0x4e,0x54,0x5f,0x52,
0x45,0x41,0x43,0x48,0x45,0x44,0x00,0x00,0x00,0x00,0xb3,0x00,0x03,0x00,0x07,0x00,
0x00,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x26,0x00,0x00,0x02,0x06,0x00,0x80,0x01,
0x84,0x00,0x00,0x02,0xa0,0x00,0x80,0x01,0x19,0x02,0xc0,0x01,0x06,0x00,0x80,0x01,
0x01,0x40,0x00,0x02,0xa0,0x40,0x80,0x01,0x29,0x00,0x80,0x01,0x11,0x01,0x80,0x01,
0x11,0x01,0x00,0x02,0x20,0x00,0x01,0x02,0x11,0x01,0x80,0x02,0x13,0x03,0x80,0x02,
0xa0,0x40,0x01,0x02,0x01,0x40,0x80,0x02,0x20,0xc1,0x80,0x01,0x29,0x00,0x80,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,
0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x06,0x6f,0x6e,0x43,0x68,0x61,0x72,0x00,0x00,
0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0c,0x64,0x69,0x73,0x70,0x61,0x74,0x63,
0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,
0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x0a,0x45,0x56,
0x45,0x4e,0x54,0x5f,0x43,0x48,0x41,0x52,0x00,0x00,0x00,0x00,0xb3,0x00,0x03,0x00,
0x07,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x26,0x00,0x00,0x02,0x06,0x00,0x80,0x01,
0x84,0x00,0x00,0x02,0xa0,0x00,0x80,0x01,0x19,0x02,0xc0,0x01,0x06,0x00,0x80,0x01,
0x01,0x40,0x00,0x02,0xa0,0x40,0x80,0x01,0x29,0x00,0x80,0x01,0x11,0x01,0x80,0x01,
0x11,0x01,0x00,0x02,0x20,0x00,0x01,0x02,0x11,0x01,0x80,0x02,0x13,0x03,0x80,0x02,
0xa0,0x40,0x01,0x02,0x01,0x40,0x80,0x02,0x20,0xc1,0x80,0x01,0x29,0x00,0x80,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,
0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x06,0x6f,0x6e,0x53,0x61,0x76,0x65,0x00,0x00,
0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0c,0x64,0x69,0x73,0x70,0x61,0x74,0x63,
0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,
0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x0a,0x45,0x56,
0x45,0x4e,0x54,0x5f,0x53,0x41,0x56,0x45,0x00,0x00,0x00,0x00,0xc0,0x00,0x03,0x00,
0x07,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x26,0x00,0x00,0x02,0x06,0x00,0x80,0x01,
0x84,0x00,0x00,0x02,0xa0,0x00,0x80,0x01,0x19,0x02,0xc0,0x01,0x06,0x00,0x80,0x01,
0x01,0x40,0x00,0x02,0xa0,0x40,0x80,0x01,0x29,0x00,0x80,0x01,0x11,0x01,0x80,0x01,
0x11,0x01,0x00,0x02,0x20,0x00,0x01,0x02,0x11,0x01,0x80,0x02,0x13,0x03,0x80,0x02,
0xa0,0x40,0x01,0x02,0x01,0x40,0x80,0x02,0x20,0xc1,0x80,0x01,0x29,0x00,0x80,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,
0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x0c,0x6f,0x6e,0x42,0x65,0x66,0x6f,0x72,0x65,
0x53,0x61,0x76,0x65,0x00,0x00,0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0c,0x64,
0x69,0x73,0x70,0x61,0x74,0x63,0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,0x76,
0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,0x5b,
0x5d,0x00,0x00,0x11,0x45,0x56,0x45,0x4e,0x54,0x5f,0x42,0x45,0x46,0x4f,0x52,0x45,
0x5f,0x53,0x41,0x56,0x45,0x00,0x00,0x00,0x00,0xc0,0x00,0x03,0x00,0x07,0x00,0x00,
0x00,0x00,0x00,0x12,0x26,0x00,0x00,0x02,0x06,0x00,0x80,0x01,0x84,0x00,0x00,0x02,
0xa0,0x00,0x80,0x01,0x19,0x02,0xc0,0x01,0x06,0x00,0x80,0x01,0x01,0x40,0x00,0x02,
0xa0,0x40,0x80,0x01,0x29,0x00,0x80,0x01,0x11,0x01,0x80,0x01,0x11,0x01,0x00,0x02,
0x20,0x00,0x01,0x02,0x11,0x01,0x80,0x02,0x13,0x03,0x80,0x02,0xa0,0x40,0x01,0x02,
0x01,0x40,0x80,0x02,0x20,0xc1,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x07,0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,0x64,0x5f,0x74,0x6f,
0x3f,0x00,0x00,0x0c,0x6f,0x6e,0x53,0x77,0x69,0x74,0x63,0x68,0x46,0x69,0x6c,0x65,
0x00,0x00,0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0c,0x64,0x69,0x73,0x70,0x61,
0x74,0x63,0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,0x76,0x65,0x6e,0x74,0x5f,
0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x11,
0x45,0x56,0x45,0x4e,0x54,0x5f,0x53,0x57,0x49,0x54,0x43,0x48,0x5f,0x46,0x49,0x4c,
0x45,0x00,0x00,0x00,0x00,0xb3,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x12,
0x26,0x00,0x00,0x02,0x06,0x00,0x80,0x01,0x84,0x00,0x00,0x02,0xa0,0x00,0x80,0x01,
0x19,0x02,0xc0,0x01,0x06,0x00,0x80,0x01,0x01,0x40,0x00,0x02,0xa0,0x40,0x80,0x01,
0x29,0x00,0x80,0x01,0x11,0x01,0x80,0x01,0x11,0x01,0x00,0x02,0x20,0x00,0x01,0x02,
0x11,0x01,0x80,0x02,0x13,0x03,0x80,0x02,0xa0,0x40,0x01,0x02,0x01,0x40,0x80,0x02,
0x20,0xc1,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,
0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x06,
0x6f,0x6e,0x4f,0x70,0x65,0x6e,0x00,0x00,0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,
0x0c,0x64,0x69,0x73,0x70,0x61,0x74,0x63,0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,
0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,
0x02,0x5b,0x5d,0x00,0x00,0x0a,0x45,0x56,0x45,0x4e,0x54,0x5f,0x4f,0x50,0x45,0x4e,
0x00,0x00,0x00,0x00,0xd9,0x00,0x02,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x15,0x00,
0x26,0x00,0x00,0x00,0x06,0x00,0x00,0x01,0x84,0x00,0x80,0x01,0xa0,0x00,0x00,0x01,
0x99,0x01,0x40,0x01,0x06,0x00,0x00,0x01,0x20,0x40,0x00,0x01,0x29,0x00,0x00,0x01,
0x11,0x01,0x00,0x01,0x20,0xc0,0x00,0x01,0x19,0x04,0x40,0x01,0x11,0x02,0x00,0x01,
0x11,0x02,0x80,0x01,0x20,0x80,0x81,0x01,0x11,0x02,0x00,0x02,0x13,0x04,0x00,0x02,
0xa0,0xc0,0x81,0x01,0xa0,0x40,0x01,0x01,0x97,0x00,0x40,0x00,0x05,0x00,0x00,0x01,
0x29,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x09,0x00,0x0b,0x72,0x65,
0x73,0x70,0x6f,0x6e,0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x0a,0x6f,0x6e,0x55,0x70,
0x64,0x61,0x74,0x65,0x55,0x49,0x00,0x00,0x06,0x45,0x64,0x69,0x74,0x6f,0x72,0x00,
0x00,0x05,0x66,0x6f,0x63,0x75,0x73,0x00,0x00,0x05,0x53,0x63,0x69,0x54,0x45,0x00,
0x00,0x0c,0x64,0x69,0x73,0x70,0x61,0x74,0x63,0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,
0x0e,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,
0x00,0x02,0x5b,0x5d,0x00,0x00,0x0f,0x45,0x56,0x45,0x4e,0x54,0x5f,0x55,0x50,0x44,
0x41,0x54,0x45,0x5f,0x55,0x49,0x00,0x00,0x00,0x00,0xc9,0x00,0x06,0x00,0x0d,0x00,
0x00,0x00,0x00,0x00,0x18,0x00,0x00,0x00,0x26,0x00,0x00,0x08,0x06,0x00,0x00,0x03,
0x84,0x00,0x80,0x03,0xa0,0x00,0x00,0x03,0x99,0x03,0x40,0x03,0x06,0x00,0x00,0x03,
0x01,0x40,0x80,0x03,0x01,0x80,0x00,0x04,0x01,0xc0,0x80,0x04,0x01,0x00,0x01,0x05,
0x20,0x42,0x00,0x03,0x29,0x00,0x00,0x03,0x11,0x01,0x00,0x03,0x11,0x01,0x80,0x03,
0x20,0x00,0x81,0x03,0x11,0x01,0x00,0x04,0x13,0x03,0x00,0x04,0xa0,0x40,0x81,0x03,
0x01,0x40,0x00,0x04,0x01,0x80,0x80,0x04,0x01,0xc0,0x00,0x05,0x01,0x00,0x81,0x05,
0xa0,0xc2,0x00,0x03,0x29,0x00,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,
0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x05,
0x6f,0x6e,0x4b,0x65,0x79,0x00,0x00,0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0c,
0x64,0x69,0x73,0x70,0x61,0x74,0x63,0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,
0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,
0x5b,0x5d,0x00,0x00,0x09,0x45,0x56,0x45,0x4e,0x54,0x5f,0x4b,0x45,0x59,0x00,0x00,
0x00,0x00,0xc8,0x00,0x04,0x00,0x09,0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x00,0x00,
0x26,0x00,0x00,0x04,0x06,0x00,0x00,0x02,0x84,0x00,0x80,0x02,0xa0,0x00,0x00,0x02,
0x99,0x02,0x40,0x02,0x06,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,
0x20,0x41,0x00,0x02,0x29,0x00,0x00,0x02,0x11,0x01,0x00,0x02,0x11,0x01,0x80,0x02,
0x20,0x00,0x81,0x02,0x11,0x01,0x00,0x03,0x13,0x03,0x00,0x03,0xa0,0x40,0x81,0x02,
0x01,0x40,0x00,0x03,0x01,0x80,0x80,0x03,0xa0,0xc1,0x00,0x02,0x29,0x00,0x00,0x02,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,
0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x0c,0x6f,0x6e,0x44,0x77,0x65,0x6c,0x6c,0x53,
0x74,0x61,0x72,0x74,0x00,0x00,0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0c,0x64,
0x69,0x73,0x70,0x61,0x74,0x63,0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,0x76,
0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,0x5b,
0x5d,0x00,0x00,0x11,0x45,0x56,0x45,0x4e,0x54,0x5f,0x44,0x57,0x45,0x4c,0x4c,0x5f,
0x53,0x54,0x41,0x52,0x54,0x00,0x00,0x00,0x00,0xb5,0x00,0x03,0x00,0x07,0x00,0x00,
0x00,0x00,0x00,0x12,0x26,0x00,0x00,0x02,0x06,0x00,0x80,0x01,0x84,0x00,0x00,0x02,
0xa0,0x00,0x80,0x01,0x19,0x02,0xc0,0x01,0x06,0x00,0x80,0x01,0x01,0x40,0x00,0x02,
0xa0,0x40,0x80,0x01,0x29,0x00,0x80,0x01,0x11,0x01,0x80,0x01,0x11,0x01,0x00,0x02,
0x20,0x00,0x01,0x02,0x11,0x01,0x80,0x02,0x13,0x03,0x80,0x02,0xa0,0x40,0x01,0x02,
0x01,0x40,0x80,0x02,0x20,0xc1,0x80,0x01,0x29,0x00,0x80,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x07,0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,0x64,0x5f,0x74,0x6f,
0x3f,0x00,0x00,0x07,0x6f,0x6e,0x43,0x6c,0x6f,0x73,0x65,0x00,0x00,0x05,0x53,0x63,
0x69,0x54,0x45,0x00,0x00,0x0c,0x64,0x69,0x73,0x70,0x61,0x74,0x63,0x68,0x5f,0x6f,
0x6e,0x65,0x00,0x00,0x0e,0x65,0x76,0x65,0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,
0x65,0x72,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,0x00,0x0b,0x45,0x56,0x45,0x4e,0x54,
0x5f,0x43,0x4c,0x4f,0x53,0x45,0x00,0x00,0x00,0x00,0xd7,0x00,0x04,0x00,0x09,0x00,
0x00,0x00,0x00,0x00,0x14,0x00,0x00,0x00,0x26,0x00,0x00,0x04,0x06,0x00,0x00,0x02,
0x84,0x00,0x80,0x02,0xa0,0x00,0x00,0x02,0x99,0x02,0x40,0x02,0x06,0x00,0x00,0x02,
0x01,0x40,0x80,0x02,0x01,0x80,0x00,0x03,0x20,0x41,0x00,0x02,0x29,0x00,0x00,0x02,
0x11,0x01,0x00,0x02,0x11,0x01,0x80,0x02,0x20,0x00,0x81,0x02,0x11,0x01,0x00,0x03,
0x13,0x03,0x00,0x03,0xa0,0x40,0x81,0x02,0x01,0x80,0x00,0x03,0x01,0x40,0x80,0x03,
0xa0,0xc1,0x00,0x02,0x29,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,
0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x13,
0x6f,0x6e,0x55,0x73,0x65,0x72,0x4c,0x69,0x73,0x74,0x53,0x65,0x6c,0x65,0x63,0x74,
0x69,0x6f,0x6e,0x00,0x00,0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0c,0x64,0x69,
0x73,0x70,0x61,0x74,0x63,0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,0x76,0x65,
0x6e,0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,0x5b,0x5d,
0x00,0x00,0x19,0x45,0x56,0x45,0x4e,0x54,0x5f,0x55,0x53,0x45,0x52,0x5f,0x4c,0x49,
0x53,0x54,0x5f,0x53,0x45,0x4c,0x45,0x43,0x54,0x49,0x4f,0x4e,0x00,0x00,0x00,0x00,
0xc4,0x00,0x04,0x00,0x09,0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x26,0x00,0x00,0x04,
0x06,0x00,0x00,0x02,0x84,0x00,0x80,0x02,0xa0,0x00,0x00,0x02,0x99,0x02,0x40,0x02,
0x06,0x00,0x00,0x02,0x06,0x00,0x80,0x02,0x20,0x80,0x80,0x02,0xa0,0x40,0x00,0x02,
0x29,0x00,0x00,0x02,0x91,0x01,0x00,0x02,0x91,0x01,0x80,0x02,0x20,0x40,0x81,0x02,
0x91,0x01,0x00,0x03,0x93,0x03,0x00,0x03,0xa0,0x80,0x81,0x02,0x01,0x40,0x00,0x03,
0x01,0x80,0x80,0x03,0xa0,0x01,0x01,0x02,0x29,0x00,0x00,0x02,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x08,0x00,0x0b,0x72,0x65,0x73,0x70,0x6f,0x6e,0x64,0x5f,0x74,0x6f,
0x3f,0x00,0x00,0x07,0x6f,0x6e,0x53,0x74,0x72,0x69,0x70,0x00,0x00,0x04,0x66,0x69,
0x6c,0x65,0x00,0x00,0x05,0x53,0x63,0x69,0x54,0x45,0x00,0x00,0x0c,0x64,0x69,0x73,
0x70,0x61,0x74,0x63,0x68,0x5f,0x6f,0x6e,0x65,0x00,0x00,0x0e,0x65,0x76,0x65,0x6e,
0x74,0x5f,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x00,0x02,0x5b,0x5d,0x00,
0x00,0x0b,0x45,0x56,0x45,0x4e,0x54,0x5f,0x53,0x54,0x52,0x49,0x50,0x00,0x00,0x00,
0x01,0xa9,0x00,0x03,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x31,0x11,0x00,0x80,0x01,
0x20,0x40,0x80,0x01,0x3d,0x00,0x00,0x02,0xa0,0x80,0x80,0x01,0x19,0x02,0xc0,0x01,
0x91,0x01,0x80,0x01,0x91,0x02,0x00,0x02,0xad,0x8b,0x01,0x02,0xa0,0x00,0x81,0x01,
0x11,0x00,0x80,0x01,0x20,0xc0,0x81,0x01,0x20,0x00,0x82,0x01,0x99,0x01,0xc0,0x01,
0x91,0x04,0x80,0x01,0x84,0x05,0x00,0x02,0xa0,0x80,0x82,0x01,0x99,0x0d,0xc0,0x01,
0x91,0x01,0x80,0x01,0x20,0x00,0x83,0x01,0x01,0xc0,0x80,0x00,0x06,0x00,0x80,0x01,
0xbd,0x00,0x00,0x02,0x01,0x40,0x80,0x02,0xac,0x80,0x01,0x02,0xa0,0x40,0x83,0x01,
0x1a,0x02,0x40,0x00,0x06,0x00,0x80,0x01,0x01,0x40,0x00,0x02,0xa0,0xc0,0x82,0x01,
0x17,0x06,0x40,0x00,0x1b,0x00,0x80,0x01,0x11,0x07,0x00,0x02,0x01,0xc0,0x80,0x02,
0xa0,0xc0,0x03,0x02,0x98,0x00,0x40,0x02,0x97,0x02,0x40,0x00,0x01,0xc0,0x00,0x01,
0x06,0x00,0x80,0x01,0x01,0x80,0x00,0x02,0xa0,0x00,0x84,0x01,0x17,0x01,0x40,0x00,
0x1d,0x00,0x80,0x01,0x1c,0x00,0x80,0x00,0x17,0x02,0x40,0x00,0x06,0x00,0x80,0x01,
0x11,0x00,0x00,0x02,0x20,0x80,0x04,0x02,0xa0,0x40,0x84,0x01,0x29,0x00,0x80,0x01,
0x00,0x00,0x00,0x02,0x00,0x00,0x04,0x72,0x75,0x62,0x79,0x00,0x00,0x0b,0x4c,0x6f,
0x61,0x64,0x69,0x6e,0x67,0x2e,0x2e,0x2e,0x20,0x00,0x00,0x00,0x13,0x00,0x06,0x45,
0x64,0x69,0x74,0x6f,0x72,0x00,0x00,0x0e,0x6c,0x65,0x78,0x65,0x72,0x5f,0x6c,0x61,
0x6e,0x67,0x75,0x61,0x67,0x65,0x00,0x00,0x02,0x21,0x3d,0x00,0x00,0x05,0x53,0x63,
0x69,0x54,0x45,0x00,0x00,0x0c,0x6d,0x65,0x6e,0x75,0x5f,0x63,0x6f,0x6d,0x6d,0x61,
0x6e,0x64,0x00,0x00,0x0c,0x49,0x44,0x4d,0x5f,0x4c,0x41,0x4e,0x47,0x55,0x41,0x47,
0x45,0x00,0x00,0x01,0x2b,0x00,0x00,0x06,0x6d,0x6f,0x64,0x69,0x66,0x79,0x00,0x00,
0x01,0x21,0x00,0x00,0x06,0x4b,0x65,0x72,0x6e,0x65,0x6c,0x00,0x00,0x0b,0x72,0x65,
0x73,0x70,0x6f,0x6e,0x64,0x5f,0x74,0x6f,0x3f,0x00,0x00,0x04,0x6c,0x6f,0x61,0x64,
0x00,0x00,0x0c,0x63,0x75,0x72,0x72,0x65,0x6e,0x74,0x5f,0x66,0x69,0x6c,0x65,0x00,
0x00,0x04,0x70,0x75,0x74,0x73,0x00,0x00,0x0d,0x53,0x74,0x61,0x6e,0x64,0x61,0x72,
0x64,0x45,0x72,0x72,0x6f,0x72,0x00,0x00,0x03,0x3d,0x3d,0x3d,0x00,0x00,0x01,0x70,
0x00,0x00,0x04,0x65,0x76,0x61,0x6c,0x00,0x00,0x08,0x67,0x65,0x74,0x5f,0x74,0x65,
0x78,0x74,0x00,0x44,0x42,0x47,0x00,0x00,0x00,0x0f,0x30,0x00,0x01,0x00,0x09,0x65,
0x78,0x74,0x6d,0x61,0x6e,0x2e,0x72,0x62,0x00,0x00,0x00,0x83,0x00,0x01,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x39,0x00,0x00,0x03,0x00,0x03,0x00,0x03,0x01,
0x16,0x01,0x16,0x01,0x16,0x01,0x1a,0x01,0x1a,0x01,0x1a,0x01,0x1e,0x01,0x1e,0x01,
0x1e,0x01,0x22,0x01,0x22,0x01,0x22,0x01,0x26,0x01,0x26,0x01,0x26,0x01,0x2a,0x01,
0x2a,0x01,0x2a,0x01,0x2e,0x01,0x2e,0x01,0x2e,0x01,0x32,0x01,0x32,0x01,0x32,0x01,
0x36,0x01,0x36,0x01,0x36,0x01,0x3a,0x01,0x3a,0x01,0x3a,0x01,0x40,0x01,0x40,0x01,
0x40,0x01,0x44,0x01,0x44,0x01,0x44,0x01,0x48,0x01,0x48,0x01,0x48,0x01,0x4c,0x01,
0x4c,0x01,0x4c,0x01,0x50,0x01,0x50,0x01,0x50,0x01,0x55,0x01,0x55,0x01,0x57,0x01,
0x57,0x01,0x57,0x01,0x57,0x01,0x66,0x01,0x66,0x01,0x66,0x00,0x00,0x00,0x8b,0x00,
0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3d,0x00,0x00,0x04,0x00,0x04,
0x00,0x05,0x00,0x05,0x00,0x06,0x00,0x06,0x00,0x07,0x00,0x07,0x00,0x08,0x00,0x08,
0x00,0x09,0x00,0x09,0x00,0x0a,0x00,0x0a,0x00,0x0b,0x00,0x0b,0x00,0x0c,0x00,0x0c,
0x00,0x0d,0x00,0x0d,0x00,0x0e,0x00,0x0e,0x00,0x0f,0x00,0x0f,0x00,0x10,0x00,0x10,
0x00,0x11,0x00,0x11,0x00,0x12,0x00,0x12,0x00,0x13,0x00,0x13,0x00,0x14,0x00,0x14,
0x00,0x15,0x00,0x15,0x00,0x17,0x00,0x17,0x00,0x18,0x00,0x18,0x00,0x19,0x00,0x19,
0x00,0x1a,0x00,0x1a,0x00,0x1c,0x00,0x1c,0x00,0x1c,0x01,0x06,0x01,0x06,0x01,0x06,
0x01,0x06,0x01,0x10,0x01,0x10,0x01,0x10,0x01,0x11,0x01,0x11,0x01,0x11,0x01,0x12,
0x01,0x12,0x01,0x12,0x01,0x12,0x00,0x00,0x01,0xa7,0x00,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xcb,0x00,0x00,0x1e,0x00,0x1e,0x00,0x1e,0x00,0x2a,0x00,
0x2a,0x00,0x2a,0x00,0x2e,0x00,0x2e,0x00,0x2e,0x00,0x44,0x00,0x44,0x00,0x44,0x00,
0x4f,0x00,0x4f,0x00,0x4f,0x00,0x5d,0x00,0x5d,0x00,0x5d,0x00,0x63,0x00,0x63,0x00,
0x63,0x00,0x66,0x00,0x66,0x00,0x66,0x00,0x69,0x00,0x69,0x00,0x69,0x00,0x6c,0x00,
0x6c,0x00,0x6c,0x00,0x6f,0x00,0x6f,0x00,0x6f,0x00,0x72,0x00,0x72,0x00,0x72,0x00,
0x75,0x00,0x75,0x00,0x75,0x00,0x78,0x00,0x78,0x00,0x78,0x00,0x7b,0x00,0x7b,0x00,
0x7b,0x00,0x7e,0x00,0x7e,0x00,0x7e,0x00,0x81,0x00,0x81,0x00,0x81,0x00,0x84,0x00,
0x84,0x00,0x84,0x00,0x87,0x00,0x87,0x00,0x87,0x00,0x8a,0x00,0x8a,0x00,0x8a,0x00,
0x8d,0x00,0x8d,0x00,0x8d,0x00,0x90,0x00,0x90,0x00,0x90,0x00,0x93,0x00,0x93,0x00,
0x93,0x00,0x9a,0x00,0x9a,0x00,0x9a,0x00,0xa4,0x00,0xa4,0x00,0xa4,0x00,0xa8,0x00,
0xa8,0x00,0xa8,0x00,0xc6,0x00,0xc6,0x00,0xc6,0x00,0xcd,0x00,0xcd,0x00,0xcd,0x00,
0xd5,0x00,0xd5,0x00,0xd5,0x00,0xe3,0x00,0xe3,0x00,0xe3,0x00,0xe5,0x00,0xe5,0x00,
0xe5,0x00,0xe5,0x00,0xe6,0x00,0xe6,0x00,0xe6,0x00,0xe6,0x00,0xe7,0x00,0xe7,0x00,
0xe7,0x00,0xe7,0x00,0xe8,0x00,0xe8,0x00,0xe8,0x00,0xe8,0x00,0xe9,0x00,0xe9,0x00,
0xe9,0x00,0xe9,0x00,0xea,0x00,0xea,0x00,0xea,0x00,0xea,0x00,0xeb,0x00,0xeb,0x00,
0xeb,0x00,0xeb,0x00,0xec,0x00,0xec,0x00,0xec,0x00,0xec,0x00,0xed,0x00,0xed,0x00,
0xed,0x00,0xed,0x00,0xef,0x00,0xef,0x00,0xef,0x00,0xef,0x00,0xf0,0x00,0xf0,0x00,
0xf0,0x00,0xf0,0x00,0xf1,0x00,0xf1,0x00,0xf1,0x00,0xf1,0x00,0xf3,0x00,0xf3,0x00,
0xf3,0x00,0xf3,0x00,0xf4,0x00,0xf4,0x00,0xf4,0x00,0xf4,0x00,0xf5,0x00,0xf5,0x00,
0xf5,0x00,0xf5,0x00,0xf6,0x00,0xf6,0x00,0xf6,0x00,0xf6,0x00,0xf7,0x00,0xf7,0x00,
0xf7,0x00,0xf7,0x00,0xf8,0x00,0xf8,0x00,0xf8,0x00,0xf8,0x00,0xf9,0x00,0xf9,0x00,
0xf9,0x00,0xf9,0x00,0xfa,0x00,0xfa,0x00,0xfa,0x00,0xfa,0x00,0xfb,0x00,0xfb,0x00,
0xfb,0x00,0xfb,0x00,0xfc,0x00,0xfc,0x00,0xfc,0x00,0xfc,0x00,0xfd,0x00,0xfd,0x00,
0xfd,0x00,0xfd,0x00,0xfe,0x00,0xfe,0x00,0xfe,0x00,0xfe,0x00,0xff,0x00,0xff,0x00,
0xff,0x00,0xff,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x01,0x01,0x01,0x01,
0x01,0x01,0x01,0x01,0x02,0x01,0x02,0x01,0x02,0x01,0x02,0x01,0x02,0x00,0x00,0x00,
0x1f,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x00,0x1e,
0x00,0x1f,0x00,0x20,0x00,0x21,0x00,0x21,0x00,0x21,0x00,0x27,0x00,0x00,0x00,0x45,
0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1a,0x00,0x00,0x21,0x00,
0x22,0x00,0x22,0x00,0x22,0x00,0x22,0x00,0x22,0x00,0x22,0x00,0x22,0x00,0x22,0x00,
0x22,0x00,0x22,0x00,0x23,0x00,0x23,0x00,0x23,0x00,0x23,0x00,0x23,0x00,0x23,0x00,
0x23,0x00,0x23,0x00,0x23,0x00,0x24,0x00,0x24,0x00,0x24,0x00,0x24,0x00,0x24,0x00,
0x24,0x00,0x00,0x00,0x25,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x0a,0x00,0x00,0x2a,0x00,0x2a,0x00,0x2a,0x00,0x2a,0x00,0x2b,0x00,0x2b,0x00,0x2b,
0x00,0x2b,0x00,0x2b,0x00,0x2b,0x00,0x00,0x00,0xe7,0x00,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x6b,0x00,0x00,0x2e,0x00,0x2e,0x00,0x2e,0x00,0x2e,0x00,
0x2e,0x00,0x2e,0x00,0x2e,0x00,0x2e,0x00,0x2f,0x00,0x30,0x00,0x30,0x00,0x30,0x00,
0x30,0x00,0x30,0x00,0x33,0x00,0x33,0x00,0x33,0x00,0x33,0x00,0x33,0x00,0x33,0x00,
0x33,0x00,0x33,0x00,0x34,0x00,0x35,0x00,0x35,0x00,0x35,0x00,0x35,0x00,0x36,0x00,
0x36,0x00,0x36,0x00,0x36,0x00,0x36,0x00,0x37,0x00,0x37,0x00,0x37,0x00,0x37,0x00,
0x37,0x00,0x37,0x00,0x37,0x00,0x39,0x00,0x39,0x00,0x39,0x00,0x39,0x00,0x39,0x00,
0x39,0x00,0x39,0x00,0x3a,0x00,0x3a,0x00,0x3a,0x00,0x3a,0x00,0x3a,0x00,0x3c,0x00,
0x3c,0x00,0x3c,0x00,0x3c,0x00,0x3c,0x00,0x3c,0x00,0x3c,0x00,0x3d,0x00,0x3d,0x00,
0x3d,0x00,0x3d,0x00,0x3d,0x00,0x3d,0x00,0x3d,0x00,0x3d,0x00,0x3d,0x00,0x3d,0x00,
0x3d,0x00,0x3d,0x00,0x3d,0x00,0x3d,0x00,0x3d,0x00,0x3d,0x00,0x3d,0x00,0x3d,0x00,
0x3d,0x00,0x3d,0x00,0x3e,0x00,0x3e,0x00,0x3e,0x00,0x3e,0x00,0x3e,0x00,0x3e,0x00,
0x3e,0x00,0x3f,0x00,0x3f,0x00,0x3f,0x00,0x3f,0x00,0x3f,0x00,0x3f,0x00,0x3f,0x00,
0x40,0x00,0x40,0x00,0x40,0x00,0x40,0x00,0x40,0x00,0x41,0x00,0x41,0x00,0x41,0x00,
0x41,0x00,0x41,0x00,0x41,0x00,0x41,0x00,0x41,0x00,0x41,0x00,0x41,0x00,0x00,0x00,
0x37,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x13,0x00,0x00,0x30,
0x00,0x30,0x00,0x31,0x00,0x31,0x00,0x31,0x00,0x31,0x00,0x31,0x00,0x31,0x00,0x31,
0x00,0x31,0x00,0x31,0x00,0x31,0x00,0x31,0x00,0x31,0x00,0x31,0x00,0x31,0x00,0x31,
0x00,0x31,0x00,0x31,0x00,0x00,0x00,0x45,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x1a,0x00,0x00,0x44,0x00,0x45,0x00,0x46,0x00,0x46,0x00,0x46,0x00,
0x46,0x00,0x47,0x00,0x47,0x00,0x47,0x00,0x47,0x00,0x47,0x00,0x47,0x00,0x48,0x00,
0x49,0x00,0x49,0x00,0x49,0x00,0x49,0x00,0x49,0x00,0x49,0x00,0x49,0x00,0x4b,0x00,
0x4b,0x00,0x4b,0x00,0x4b,0x00,0x4b,0x00,0x4b,0x00,0x00,0x00,0x1d,0x00,0x01,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0x00,0x47,0x00,0x47,0x00,0x47,
0x00,0x47,0x00,0x47,0x00,0x47,0x00,0x00,0x00,0x27,0x00,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0x00,0x4f,0x00,0x50,0x00,0x50,0x00,0x50,0x00,
0x50,0x00,0x51,0x00,0x51,0x00,0x53,0x00,0x53,0x00,0x53,0x00,0x53,0x00,0x00,0x00,
0x3f,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x17,0x00,0x00,0x53,
0x00,0x54,0x00,0x54,0x00,0x54,0x00,0x54,0x00,0x54,0x00,0x54,0x00,0x54,0x00,0x55,
0x00,0x55,0x00,0x56,0x00,0x57,0x00,0x59,0x00,0x59,0x00,0x59,0x00,0x59,0x00,0x59,
0x00,0x59,0x00,0x59,0x00,0x59,0x00,0x59,0x00,0x59,0x00,0x59,0x00,0x00,0x00,0x23,
0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x09,0x00,0x00,0x59,0x00,
0x59,0x00,0x59,0x00,0x59,0x00,0x59,0x00,0x59,0x00,0x59,0x00,0x59,0x00,0x59,0x00,
0x00,0x00,0x47,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1b,0x00,
0x00,0x5d,0x00,0x5d,0x00,0x5d,0x00,0x5d,0x00,0x5e,0x00,0x5e,0x00,0x5e,0x00,0x5e,
0x00,0x5e,0x00,0x5e,0x00,0x5e,0x00,0x5e,0x00,0x5e,0x00,0x5f,0x00,0x5f,0x00,0x5f,
0x00,0x5f,0x00,0x5f,0x00,0x5f,0x00,0x5f,0x00,0x5f,0x00,0x5f,0x00,0x60,0x00,0x60,
0x00,0x60,0x00,0x60,0x00,0x60,0x00,0x00,0x00,0x25,0x00,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x63,0x00,0x63,0x00,0x63,0x00,0x63,0x00,
0x64,0x00,0x64,0x00,0x64,0x00,0x64,0x00,0x64,0x00,0x64,0x00,0x00,0x00,0x25,0x00,
0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x66,0x00,0x66,
0x00,0x66,0x00,0x66,0x00,0x67,0x00,0x67,0x00,0x67,0x00,0x67,0x00,0x67,0x00,0x67,
0x00,0x00,0x00,0x25,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0a,
0x00,0x00,0x69,0x00,0x69,0x00,0x69,0x00,0x69,0x00,0x6a,0x00,0x6a,0x00,0x6a,0x00,
0x6a,0x00,0x6a,0x00,0x6a,0x00,0x00,0x00,0x25,0x00,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x6c,0x00,0x6c,0x00,0x6c,0x00,0x6c,0x00,0x6d,
0x00,0x6d,0x00,0x6d,0x00,0x6d,0x00,0x6d,0x00,0x6d,0x00,0x00,0x00,0x25,0x00,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x6f,0x00,0x6f,0x00,
0x6f,0x00,0x6f,0x00,0x70,0x00,0x70,0x00,0x70,0x00,0x70,0x00,0x70,0x00,0x70,0x00,
0x00,0x00,0x25,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,
0x00,0x72,0x00,0x72,0x00,0x72,0x00,0x72,0x00,0x73,0x00,0x73,0x00,0x73,0x00,0x73,
0x00,0x73,0x00,0x73,0x00,0x00,0x00,0x25,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x0a,0x00,0x00,0x75,0x00,0x75,0x00,0x75,0x00,0x75,0x00,0x76,0x00,
0x76,0x00,0x76,0x00,0x76,0x00,0x76,0x00,0x76,0x00,0x00,0x00,0x25,0x00,0x01,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x78,0x00,0x78,0x00,0x78,
0x00,0x78,0x00,0x79,0x00,0x79,0x00,0x79,0x00,0x79,0x00,0x79,0x00,0x79,0x00,0x00,
0x00,0x25,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,
0x7b,0x00,0x7b,0x00,0x7b,0x00,0x7b,0x00,0x7c,0x00,0x7c,0x00,0x7c,0x00,0x7c,0x00,
0x7c,0x00,0x7c,0x00,0x00,0x00,0x25,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x0a,0x00,0x00,0x7e,0x00,0x7e,0x00,0x7e,0x00,0x7e,0x00,0x7f,0x00,0x7f,
0x00,0x7f,0x00,0x7f,0x00,0x7f,0x00,0x7f,0x00,0x00,0x00,0x25,0x00,0x01,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x81,0x00,0x81,0x00,0x81,0x00,
0x81,0x00,0x82,0x00,0x82,0x00,0x82,0x00,0x82,0x00,0x82,0x00,0x82,0x00,0x00,0x00,
0x25,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x84,
0x00,0x84,0x00,0x84,0x00,0x84,0x00,0x85,0x00,0x85,0x00,0x85,0x00,0x85,0x00,0x85,
0x00,0x85,0x00,0x00,0x00,0x25,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x0a,0x00,0x00,0x87,0x00,0x87,0x00,0x87,0x00,0x87,0x00,0x88,0x00,0x88,0x00,
0x88,0x00,0x88,0x00,0x88,0x00,0x88,0x00,0x00,0x00,0x25,0x00,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x8a,0x00,0x8a,0x00,0x8a,0x00,0x8a,
0x00,0x8b,0x00,0x8b,0x00,0x8b,0x00,0x8b,0x00,0x8b,0x00,0x8b,0x00,0x00,0x00,0x25,
0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x8d,0x00,
0x8d,0x00,0x8d,0x00,0x8d,0x00,0x8e,0x00,0x8e,0x00,0x8e,0x00,0x8e,0x00,0x8e,0x00,
0x8e,0x00,0x00,0x00,0x25,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x0a,0x00,0x00,0x90,0x00,0x90,0x00,0x90,0x00,0x90,0x00,0x91,0x00,0x91,0x00,0x91,
0x00,0x91,0x00,0x91,0x00,0x91,0x00,0x00,0x00,0x31,0x00,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x93,0x00,0x94,0x00,0x94,0x00,0x94,0x00,
0x94,0x00,0x94,0x00,0x95,0x00,0x95,0x00,0x95,0x00,0x95,0x00,0x95,0x00,0x95,0x00,
0x96,0x00,0x96,0x00,0x96,0x00,0x96,0x00,0x00,0x00,0x6d,0x00,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x2e,0x00,0x00,0x9a,0x00,0x9a,0x00,0x9a,0x00,0x9a,
0x00,0x9a,0x00,0x9a,0x00,0x9b,0x00,0x9c,0x00,0x9c,0x00,0x9c,0x00,0x9c,0x00,0x9c,
0x00,0x9c,0x00,0x9c,0x00,0x9c,0x00,0x9d,0x00,0x9d,0x00,0x9d,0x00,0x9d,0x00,0x9d,
0x00,0x9e,0x00,0x9e,0x00,0x9e,0x00,0x9e,0x00,0x9e,0x00,0x9e,0x00,0x9e,0x00,0x9f,
0x00,0x9f,0x00,0x9f,0x00,0x9f,0x00,0x9f,0x00,0x9f,0x00,0x9f,0x00,0xa0,0x00,0xa0,
0x00,0xa0,0x00,0xa0,0x00,0xa1,0x00,0xa1,0x00,0xa1,0x00,0xa1,0x00,0xa1,0x00,0xa1,
0x00,0xa1,0x00,0xa1,0x00,0x00,0x00,0x1b,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x05,0x00,0x00,0xa4,0x00,0xa5,0x00,0xa5,0x00,0xa5,0x00,0xa5,0x00,
0x00,0x00,0x8b,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3d,0x00,
0x00,0xa8,0x00,0xa9,0x00,0xa9,0x00,0xa9,0x00,0xa9,0x00,0xa9,0x00,0xa9,0x00,0xa9,
0x00,0xa9,0x00,0xaa,0x00,0xaa,0x00,0xaa,0x00,0xaa,0x00,0xaa,0x00,0xaa,0x00,0xaa,
0x00,0xab,0x00,0xab,0x00,0xab,0x00,0xab,0x00,0xab,0x00,0xab,0x00,0xab,0x00,0xab,
0x00,0xac,0x00,0xac,0x00,0xac,0x00,0xac,0x00,0xad,0x00,0xad,0x00,0xad,0x00,0xad,
0x00,0xb6,0x00,0xb6,0x00,0xb6,0x00,0xb6,0x00,0xb7,0x00,0xb7,0x00,0xb7,0x00,0xb7,
0x00,0xb8,0x00,0xb8,0x00,0xb8,0x00,0xb8,0x00,0xb8,0x00,0xb8,0x00,0xb8,0x00,0xb8,
0x00,0xba,0x00,0xba,0x00,0xba,0x00,0xba,0x00,0xbb,0x00,0xbb,0x00,0xbb,0x00,0xbb,
0x00,0xbb,0x00,0xbb,0x00,0xbb,0x00,0xbb,0x00,0xbb,0x00,0x00,0x00,0x55,0x00,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x22,0x00,0x00,0xad,0x00,0xaf,0x00,
0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,
0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,
0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,0xaf,0x00,
0xaf,0x00,0xb1,0x00,0xb1,0x00,0xb1,0x00,0xb1,0x00,0xb1,0x00,0xb1,0x00,0xb1,0x00,
0x00,0x00,0x55,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x22,0x00,
0x00,0xbb,0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,
0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,
0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbd,
0x00,0xbd,0x00,0xbd,0x00,0xbd,0x00,0xbf,0x00,0xbf,0x00,0xbf,0x00,0xbf,0x00,0xbf,
0x00,0xbf,0x00,0xbf,0x00,0x00,0x00,0x3d,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x16,0x00,0x00,0xc6,0x00,0xc7,0x00,0xc7,0x00,0xc7,0x00,0xc7,0x00,
0xc7,0x00,0xc7,0x00,0xc7,0x00,0xc7,0x00,0xc7,0x00,0xc7,0x00,0xc7,0x00,0xc7,0x00,
0xc8,0x00,0xc8,0x00,0xc8,0x00,0xc8,0x00,0xc8,0x00,0xc8,0x00,0xc8,0x00,0xca,0x00,
0xca,0x00,0x00,0x00,0x47,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x1b,0x00,0x00,0xcd,0x00,0xce,0x00,0xce,0x00,0xce,0x00,0xcf,0x00,0xcf,0x00,0xcf,
0x00,0xcf,0x00,0xcf,0x00,0xd1,0x00,0xd1,0x00,0xd1,0x00,0xd1,0x00,0xd1,0x00,0xd1,
0x00,0xd1,0x00,0xd1,0x00,0xd1,0x00,0xd2,0x00,0xd2,0x00,0xd2,0x00,0xd2,0x00,0xd2,
0x00,0xd2,0x00,0xd2,0x00,0xd2,0x00,0xd2,0x00,0x00,0x00,0x93,0x00,0x01,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x41,0x00,0x00,0xd5,0x00,0xd6,0x00,0xd6,0x00,
0xd6,0x00,0xd6,0x00,0xd6,0x00,0xd6,0x00,0xd7,0x00,0xd7,0x00,0xd7,0x00,0xd8,0x00,
0xd9,0x00,0xd9,0x00,0xd9,0x00,0xd9,0x00,0xd9,0x00,0xd9,0x00,0xd9,0x00,0xd9,0x00,
0xd9,0x00,0xd9,0x00,0xd9,0x00,0xd9,0x00,0xd9,0x00,0xd9,0x00,0xd9,0x00,0xd9,0x00,
0xda,0x00,0xda,0x00,0xda,0x00,0xda,0x00,0xda,0x00,0xda,0x00,0xda,0x00,0xda,0x00,
0xda,0x00,0xdb,0x00,0xdb,0x00,0xdd,0x00,0xdd,0x00,0xdd,0x00,0xdd,0x00,0xdd,0x00,
0xdd,0x00,0xdd,0x00,0xdd,0x00,0xdd,0x00,0xdd,0x00,0xdd,0x00,0xdd,0x00,0xdd,0x00,
0xdd,0x00,0xdd,0x00,0xdd,0x00,0xde,0x00,0xde,0x00,0xde,0x00,0xde,0x00,0xde,0x00,
0xde,0x00,0xde,0x00,0xde,0x00,0xde,0x00,0xdf,0x00,0xdf,0x00,0x00,0x00,0x4b,0x00,
0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1d,0x00,0x01,0x07,0x01,0x07,
0x01,0x07,0x01,0x07,0x01,0x08,0x01,0x08,0x01,0x08,0x01,0x08,0x01,0x09,0x01,0x09,
0x01,0x09,0x01,0x09,0x01,0x0a,0x01,0x0a,0x01,0x0a,0x01,0x0a,0x01,0x0b,0x01,0x0b,
0x01,0x0b,0x01,0x0b,0x01,0x0c,0x01,0x0c,0x01,0x0c,0x01,0x0c,0x01,0x0d,0x01,0x0d,
0x01,0x0d,0x01,0x0d,0x01,0x0d,0x00,0x00,0x00,0x1b,0x00,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x01,0x10,0x01,0x10,0x01,0x10,0x01,0x10,0x01,
0x10,0x00,0x00,0x00,0x1b,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x05,0x00,0x01,0x11,0x01,0x11,0x01,0x11,0x01,0x11,0x01,0x11,0x00,0x00,0x00,0x1b,
0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x01,0x12,0x01,
0x12,0x01,0x12,0x01,0x12,0x01,0x12,0x00,0x00,0x00,0x31,0x00,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x01,0x16,0x01,0x17,0x01,0x17,0x01,0x17,
0x01,0x17,0x01,0x17,0x01,0x17,0x01,0x17,0x01,0x18,0x01,0x18,0x01,0x18,0x01,0x18,
0x01,0x18,0x01,0x18,0x01,0x18,0x01,0x18,0x00,0x00,0x00,0x31,0x00,0x01,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x01,0x1a,0x01,0x1b,0x01,0x1b,0x01,
0x1b,0x01,0x1b,0x01,0x1b,0x01,0x1b,0x01,0x1b,0x01,0x1c,0x01,0x1c,0x01,0x1c,0x01,
0x1c,0x01,0x1c,0x01,0x1c,0x01,0x1c,0x01,0x1c,0x00,0x00,0x00,0x31,0x00,0x01,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x01,0x1e,0x01,0x1f,0x01,0x1f,
0x01,0x1f,0x01,0x1f,0x01,0x1f,0x01,0x1f,0x01,0x1f,0x01,0x20,0x01,0x20,0x01,0x20,
0x01,0x20,0x01,0x20,0x01,0x20,0x01,0x20,0x01,0x20,0x00,0x00,0x00,0x31,0x00,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x01,0x22,0x01,0x23,0x01,
0x23,0x01,0x23,0x01,0x23,0x01,0x23,0x01,0x23,0x01,0x23,0x01,0x24,0x01,0x24,0x01,
0x24,0x01,0x24,0x01,0x24,0x01,0x24,0x01,0x24,0x01,0x24,0x00,0x00,0x00,0x35,0x00,
0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x01,0x26,0x01,0x27,
0x01,0x27,0x01,0x27,0x01,0x27,0x01,0x27,0x01,0x27,0x01,0x27,0x01,0x27,0x01,0x28,
0x01,0x28,0x01,0x28,0x01,0x28,0x01,0x28,0x01,0x28,0x01,0x28,0x01,0x28,0x01,0x28,
0x00,0x00,0x00,0x35,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12,
0x00,0x01,0x2a,0x01,0x2b,0x01,0x2b,0x01,0x2b,0x01,0x2b,0x01,0x2b,0x01,0x2b,0x01,
0x2b,0x01,0x2b,0x01,0x2c,0x01,0x2c,0x01,0x2c,0x01,0x2c,0x01,0x2c,0x01,0x2c,0x01,
0x2c,0x01,0x2c,0x01,0x2c,0x00,0x00,0x00,0x35,0x00,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x12,0x00,0x01,0x2e,0x01,0x2f,0x01,0x2f,0x01,0x2f,0x01,0x2f,
0x01,0x2f,0x01,0x2f,0x01,0x2f,0x01,0x2f,0x01,0x30,0x01,0x30,0x01,0x30,0x01,0x30,
0x01,0x30,0x01,0x30,0x01,0x30,0x01,0x30,0x01,0x30,0x00,0x00,0x00,0x35,0x00,0x01,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x01,0x32,0x01,0x33,0x01,
0x33,0x01,0x33,0x01,0x33,0x01,0x33,0x01,0x33,0x01,0x33,0x01,0x33,0x01,0x34,0x01,
0x34,0x01,0x34,0x01,0x34,0x01,0x34,0x01,0x34,0x01,0x34,0x01,0x34,0x01,0x34,0x00,
0x00,0x00,0x35,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12,0x00,
0x01,0x36,0x01,0x37,0x01,0x37,0x01,0x37,0x01,0x37,0x01,0x37,0x01,0x37,0x01,0x37,
0x01,0x37,0x01,0x38,0x01,0x38,0x01,0x38,0x01,0x38,0x01,0x38,0x01,0x38,0x01,0x38,
0x01,0x38,0x01,0x38,0x00,0x00,0x00,0x3b,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x15,0x00,0x01,0x3a,0x01,0x3b,0x01,0x3b,0x01,0x3b,0x01,0x3b,0x01,
0x3b,0x01,0x3b,0x01,0x3b,0x01,0x3c,0x01,0x3c,0x01,0x3c,0x01,0x3d,0x01,0x3d,0x01,
0x3d,0x01,0x3d,0x01,0x3d,0x01,0x3d,0x01,0x3d,0x01,0x3d,0x01,0x3d,0x01,0x3d,0x00,
0x00,0x00,0x41,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x00,
0x01,0x40,0x01,0x41,0x01,0x41,0x01,0x41,0x01,0x41,0x01,0x41,0x01,0x41,0x01,0x41,
0x01,0x41,0x01,0x41,0x01,0x41,0x01,0x41,0x01,0x42,0x01,0x42,0x01,0x42,0x01,0x42,
0x01,0x42,0x01,0x42,0x01,0x42,0x01,0x42,0x01,0x42,0x01,0x42,0x01,0x42,0x01,0x42,
0x00,0x00,0x00,0x39,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x14,
0x00,0x01,0x44,0x01,0x45,0x01,0x45,0x01,0x45,0x01,0x45,0x01,0x45,0x01,0x45,0x01,
0x45,0x01,0x45,0x01,0x45,0x01,0x46,0x01,0x46,0x01,0x46,0x01,0x46,0x01,0x46,0x01,
0x46,0x01,0x46,0x01,0x46,0x01,0x46,0x01,0x46,0x00,0x00,0x00,0x35,0x00,0x01,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x01,0x48,0x01,0x49,0x01,0x49,
0x01,0x49,0x01,0x49,0x01,0x49,0x01,0x49,0x01,0x49,0x01,0x49,0x01,0x4a,0x01,0x4a,
0x01,0x4a,0x01,0x4a,0x01,0x4a,0x01,0x4a,0x01,0x4a,0x01,0x4a,0x01,0x4a,0x00,0x00,
0x00,0x39,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x01,
0x4c,0x01,0x4d,0x01,0x4d,0x01,0x4d,0x01,0x4d,0x01,0x4d,0x01,0x4d,0x01,0x4d,0x01,
0x4d,0x01,0x4d,0x01,0x4e,0x01,0x4e,0x01,0x4e,0x01,0x4e,0x01,0x4e,0x01,0x4e,0x01,
0x4e,0x01,0x4e,0x01,0x4e,0x01,0x4e,0x00,0x00,0x00,0x39,0x00,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x01,0x50,0x01,0x51,0x01,0x51,0x01,0x51,
0x01,0x51,0x01,0x51,0x01,0x51,0x01,0x51,0x01,0x51,0x01,0x51,0x01,0x52,0x01,0x52,
0x01,0x52,0x01,0x52,0x01,0x52,0x01,0x52,0x01,0x52,0x01,0x52,0x01,0x52,0x01,0x52,
0x00,0x00,0x00,0x73,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x31,
0x00,0x01,0x58,0x01,0x58,0x01,0x58,0x01,0x58,0x01,0x58,0x01,0x59,0x01,0x59,0x01,
0x59,0x01,0x59,0x01,0x5b,0x01,0x5b,0x01,0x5b,0x01,0x5b,0x01,0x5b,0x01,0x5b,0x01,
0x5b,0x01,0x5b,0x01,0x5c,0x01,0x5c,0x01,0x5c,0x01,0x5d,0x01,0x5d,0x01,0x5d,0x01,
0x5d,0x01,0x5d,0x01,0x5f,0x01,0x5f,0x01,0x5f,0x01,0x5f,0x01,0x5f,0x01,0x5f,0x01,
0x5f,0x01,0x5f,0x01,0x5f,0x01,0x5f,0x01,0x5f,0x01,0x5f,0x01,0x61,0x01,0x61,0x01,
0x61,0x01,0x61,0x01,0x61,0x01,0x61,0x01,0x61,0x01,0x64,0x01,0x64,0x01,0x64,0x01,
0x64,0x01,0x64,0x4c,0x56,0x41,0x52,0x00,0x00,0x03,0x50,0x00,0x00,0x00,0x2d,0x00,
0x08,0x68,0x61,0x6e,0x64,0x6c,0x65,0x72,0x73,0x00,0x04,0x61,0x72,0x67,0x73,0x00,
0x03,0x72,0x65,0x74,0x00,0x01,0x69,0x00,0x04,0x6e,0x61,0x6d,0x65,0x00,0x05,0x70,
0x61,0x72,0x61,0x6d,0x00,0x04,0x6d,0x6f,0x64,0x65,0x00,0x08,0x73,0x68,0x6f,0x72,
0x74,0x63,0x75,0x74,0x00,0x05,0x62,0x6c,0x6f,0x63,0x6b,0x00,0x03,0x69,0x64,0x78,
0x00,0x02,0x69,0x69,0x00,0x05,0x77,0x68,0x69,0x63,0x68,0x00,0x03,0x63,0x6d,0x64,
0x00,0x06,0x6d,0x65,0x74,0x68,0x6f,0x64,0x00,0x03,0x61,0x72,0x67,0x00,0x03,0x6f,
0x62,0x6a,0x00,0x05,0x6e,0x61,0x6d,0x65,0x73,0x00,0x03,0x74,0x62,0x6c,0x00,0x01,
0x76,0x00,0x05,0x65,0x76,0x65,0x6e,0x74,0x00,0x06,0x72,0x65,0x6d,0x6f,0x76,0x65,
0x00,0x03,0x62,0x6c,0x6b,0x00,0x01,0x73,0x00,0x04,0x6c,0x69,0x73,0x74,0x00,0x05,
0x73,0x74,0x61,0x72,0x74,0x00,0x02,0x74,0x70,0x00,0x03,0x73,0x65,0x70,0x00,0x04,
0x70,0x61,0x6e,0x65,0x00,0x04,0x70,0x61,0x74,0x68,0x00,0x04,0x66,0x69,0x6c,0x65,
0x00,0x01,0x65,0x00,0x08,0x6c,0x69,0x6e,0x65,0x5f,0x70,0x6f,0x73,0x00,0x06,0x6c,
0x69,0x6e,0x65,0x6e,0x6f,0x00,0x04,0x65,0x6e,0x64,0x6c,0x00,0x02,0x63,0x68,0x00,
0x0e,0x65,0x64,0x69,0x74,0x6f,0x72,0x5f,0x66,0x6f,0x63,0x75,0x73,0x65,0x64,0x00,
0x03,0x6b,0x65,0x79,0x00,0x05,0x73,0x68,0x69,0x66,0x74,0x00,0x04,0x63,0x74,0x72,
0x6c,0x00,0x03,0x61,0x6c,0x74,0x00,0x03,0x70,0x6f,0x73,0x00,0x03,0x73,0x74,0x72,
0x00,0x07,0x63,0x6f,0x6e,0x74,0x72,0x6f,0x6c,0x00,0x06,0x63,0x68,0x61,0x6e,0x67,
0x65,0x00,0x0c,0x63,0x75,0x72,0x72,0x65,0x6e,0x74,0x5f,0x66,0x69,0x6c,0x65,0x00,
0x00,0x00,0x01,0x00,0x01,0x00,0x02,0xff,0xff,0x00,0x00,0x00,0x02,0x00,0x04,0x00,
0x03,0x00,0x01,0xff,0xff,0x00,0x00,0x00,0x04,0x00,0x01,0x00,0x05,0x00,0x02,0xff,
0xff,0x00,0x00,0x00,0x04,0x00,0x01,0x00,0x06,0x00,0x02,0x00,0x07,0x00,0x03,0x00,
0x05,0x00,0x04,0x00,0x08,0x00,0x05,0x00,0x09,0x00,0x06,0x00,0x0a,0x00,0x07,0x00,
0x0b,0x00,0x08,0x00,0x0c,0x00,0x09,0x00,0x0d,0x00,0x01,0x00,0x0e,0x00,0x02,0xff,
0xff,0x00,0x00,0x00,0x0f,0x00,0x04,0x00,0x10,0x00,0x05,0x00,0x04,0x00,0x01,0xff,
0xff,0x00,0x00,0x00,0x11,0x00,0x01,0xff,0xff,0x00,0x00,0x00,0x12,0x00,0x01,0xff,
0xff,0x00,0x00,0x00,0x04,0x00,0x03,0x00,0x0c,0x00,0x04,0x00,0x06,0x00,0x05,0x00,
0x07,0x00,0x06,0x00,0x0e,0x00,0x01,0xff,0xff,0x00,0x00,0x00,0x13,0x00,0x01,0x00,
0x14,0x00,0x02,0x00,0x15,0x00,0x03,0x00,0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,
0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,
0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,
0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,
0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,
0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,
0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,
0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,
0x14,0x00,0x01,0x00,0x15,0x00,0x02,0x00,0x16,0x00,0x01,0x00,0x15,0x00,0x02,0x00,
0x17,0x00,0x01,0x00,0x18,0x00,0x02,0x00,0x19,0x00,0x03,0x00,0x15,0x00,0x04,0x00,
0x1a,0x00,0x05,0x00,0x16,0x00,0x06,0x00,0x1b,0x00,0x07,0xff,0xff,0x00,0x00,0xff,
0xff,0x00,0x00,0x00,0x1c,0x00,0x02,0x00,0x1d,0x00,0x01,0xff,0xff,0x00,0x00,0x00,
0x1e,0x00,0x03,0x00,0x1d,0x00,0x01,0xff,0xff,0x00,0x00,0x00,0x1e,0x00,0x03,0x00,
0x1d,0x00,0x01,0xff,0xff,0x00,0x00,0x00,0x1b,0x00,0x01,0xff,0xff,0x00,0x00,0x00,
0x1f,0x00,0x03,0x00,0x20,0x00,0x04,0x00,0x21,0x00,0x05,0x00,0x22,0x00,0x01,0xff,
0xff,0x00,0x00,0x00,0x23,0x00,0x03,0x00,0x1d,0x00,0x01,0xff,0xff,0x00,0x00,0x00,
0x1d,0x00,0x01,0xff,0xff,0x00,0x00,0x00,0x1d,0x00,0x01,0xff,0xff,0x00,0x00,0xff,
0xff,0x00,0x00,0xff,0xff,0x00,0x00,0xff,0xff,0x00,0x00,0xff,0xff,0x00,0x00,0x00,
0x22,0x00,0x01,0xff,0xff,0x00,0x00,0x00,0x1d,0x00,0x01,0xff,0xff,0x00,0x00,0x00,
0x1d,0x00,0x01,0xff,0xff,0x00,0x00,0x00,0x1d,0x00,0x01,0xff,0xff,0x00,0x00,0x00,
0x1d,0x00,0x01,0xff,0xff,0x00,0x00,0xff,0xff,0x00,0x00,0x00,0x24,0x00,0x01,0x00,
0x25,0x00,0x02,0x00,0x26,0x00,0x03,0x00,0x27,0x00,0x04,0xff,0xff,0x00,0x00,0x00,
0x28,0x00,0x01,0x00,0x16,0x00,0x02,0xff,0xff,0x00,0x00,0x00,0x1d,0x00,0x01,0xff,
0xff,0x00,0x00,0x00,0x19,0x00,0x01,0x00,0x29,0x00,0x02,0xff,0xff,0x00,0x00,0x00,
0x2a,0x00,0x01,0x00,0x2b,0x00,0x02,0xff,0xff,0x00,0x00,0x00,0x2c,0x00,0x01,0x00,
0x1e,0x00,0x02,0x45,0x4e,0x44,0x00,0x00,0x00,0x00,0x08,
};
| sdottaka/mruby-bin-scite-mruby | tools/scite/mrblib/mrblib_extman.c | C | mit | 89,318 |
/********************************************************************
Software License Agreement:
The software supplied herewith by Microchip Technology Incorporated
(the "Company") for its PIC(R) Microcontroller is intended and
supplied to you, the Company's customer, for use solely and
exclusively on Microchip PIC Microcontroller products. The
software is owned by the Company and/or its supplier, and is
protected under applicable copyright laws. All rights are reserved.
Any use in violation of the foregoing restrictions may subject the
user to criminal sanctions under applicable laws, as well as to
civil liability for the breach of the terms and conditions of this
license.
THIS SOFTWARE IS PROVIDED IN AN "AS IS" CONDITION. NO WARRANTIES,
WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED
TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. THE COMPANY SHALL NOT,
IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
*******************************************************************/
#include <xc.h>
#include <system.h>
#include <system_config.h>
#include <usb/usb.h>
// PIC24FJ64GB002 Configuration Bit Settings
#include <xc.h>
// CONFIG4
#pragma config DSWDTPS = DSWDTPS3 // DSWDT Postscale Select (1:128 (132 ms))
#pragma config DSWDTOSC = LPRC // Deep Sleep Watchdog Timer Oscillator Select (DSWDT uses Low Power RC Oscillator (LPRC))
#pragma config RTCOSC = SOSC // RTCC Reference Oscillator Select (RTCC uses Secondary Oscillator (SOSC))
#pragma config DSBOREN = OFF // Deep Sleep BOR Enable bit (BOR disabled in Deep Sleep)
#pragma config DSWDTEN = OFF // Deep Sleep Watchdog Timer (DSWDT disabled)
// CONFIG3
#pragma config WPFP = WPFP0 // Write Protection Flash Page Segment Boundary (Page 0 (0x0))
#pragma config SOSCSEL = IO // Secondary Oscillator Pin Mode Select (SOSC pins have digital I/O functions (RA4, RB4))
#pragma config WUTSEL = LEG // Voltage Regulator Wake-up Time Select (Default regulator start-up time used)
#pragma config WPDIS = WPDIS // Segment Write Protection Disable (Segmented code protection disabled)
#pragma config WPCFG = WPCFGDIS // Write Protect Configuration Page Select (Last page and Flash Configuration words are unprotected)
#pragma config WPEND = WPENDMEM // Segment Write Protection End Page Select (Write Protect from WPFP to the last page of memory)
// CONFIG2
#pragma config POSCMOD = XT // Primary Oscillator Select (XT Oscillator mode selected)
#pragma config I2C1SEL = PRI // I2C1 Pin Select bit (Use default SCL1/SDA1 pins for I2C1 )
#pragma config IOL1WAY = OFF // IOLOCK One-Way Set Enable (The IOLOCK bit can be set and cleared using the unlock sequence)
#pragma config OSCIOFNC = ON // OSCO Pin Configuration (OSCO pin functions as port I/O (RA3))
#pragma config FCKSM = CSDCMD // Clock Switching and Fail-Safe Clock Monitor (Sw Disabled, Mon Disabled)
#pragma config FNOSC = PRIPLL // Initial Oscillator Select (Primary Oscillator with PLL module (XTPLL, HSPLL, ECPLL))
#pragma config PLL96MHZ = ON // 96MHz PLL Startup Select (96 MHz PLL Startup is enabled automatically on start-up)
#pragma config PLLDIV = DIV2 // USB 96 MHz PLL Prescaler Select (Oscillator input divided by 2 (8 MHz input))
#pragma config IESO = OFF // Internal External Switchover (IESO mode (Two-Speed Start-up) disabled)
// CONFIG1
#pragma config WDTPS = PS1 // Watchdog Timer Postscaler (1:1)
#pragma config FWPSA = PR32 // WDT Prescaler (Prescaler ratio of 1:32)
#pragma config WINDIS = OFF // Windowed WDT (Standard Watchdog Timer enabled,(Windowed-mode is disabled))
#pragma config FWDTEN = OFF // Watchdog Timer (Watchdog Timer is disabled)
#pragma config ICS = PGx1 // Emulator Pin Placement Select bits (Emulator functions are shared with PGEC1/PGED1)
#pragma config GWRP = OFF // General Segment Write Protect (Writes to program memory are allowed)
#pragma config GCP = OFF // General Segment Code Protect (Code protection is disabled)
#pragma config JTAGEN = OFF // JTAG Port Enable (JTAG port is disabled)
/*********************************************************************
* Function: void SYSTEM_Initialize( SYSTEM_STATE state )
*
* Overview: Initializes the system.
*
* PreCondition: None
*
* Input: SYSTEM_STATE - the state to initialize the system into
*
* Output: None
*
********************************************************************/
void SYSTEM_Initialize( SYSTEM_STATE state )
{
//On the PIC24FJ64GB004 Family of USB microcontrollers, the PLL will not power up and be enabled
//by default, even if a PLL enabled oscillator configuration is selected (such as HS+PLL).
//This allows the device to power up at a lower initial operating frequency, which can be
//advantageous when powered from a source which is not gauranteed to be adequate for 32MHz
//operation. On these devices, user firmware needs to manually set the CLKDIV<PLLEN> bit to
//power up the PLL.
{
unsigned int pll_startup_counter = 600;
CLKDIVbits.PLLEN = 1;
while(pll_startup_counter--);
}
switch(state)
{
case SYSTEM_STATE_USB_HOST:
PRINT_SetConfiguration(PRINT_CONFIGURATION_UART);
break;
case SYSTEM_STATE_USB_HOST_HID_KEYBOARD:
LED_Enable(LED_USB_HOST_HID_KEYBOARD_DEVICE_READY);
//also setup UART here
PRINT_SetConfiguration(PRINT_CONFIGURATION_UART);
//timwuu 2015.04.11 LCD_CursorEnable(true);
TIMER_SetConfiguration(TIMER_CONFIGURATION_1MS);
break;
}
}
void __attribute__((interrupt,auto_psv)) _USB1Interrupt()
{
USB_HostInterruptHandler();
}
| timwuu/PK3SP24 | v2014_07_22/apps/usb/host/hid_bridgeHost/firmware/src/system_config/exp16/pic24fj64gb002_pim/system.c | C | mit | 6,072 |
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h> /* Definition of AT_* constants */
#ifndef _MSC_VER
#include <unistd.h>
#include <dirent.h>
#else
#pragma warning(disable:4996)
#endif
#include "logging.h"
#include "config.h"
#include "oracle.h"
#include "tempfs.h"
#include "util.h"
#include "query.h"
static int dbr_refresh_object(const char *schema,
const char *ora_type,
const char *object,
time_t last_ddl_time) {
char object_with_suffix[300];
// convert oracle type to filesystem type
char *fs_type = strdup(ora_type);
if (fs_type == NULL) {
logmsg(LOG_ERROR, "dbr_refresh_object(): unable to allocate memory for ora_type");
return EXIT_FAILURE;
}
utl_ora2fstype(&fs_type);
// get suffix based on type
char *suffix = NULL;
if (str_suffix(&suffix, ora_type) != EXIT_SUCCESS) {
logmsg(LOG_ERROR, "dbr_refresh_object(): unable to determine file suffix");
if (suffix != NULL)
free(suffix);
if (fs_type != NULL)
free(fs_type);
return EXIT_FAILURE;
}
snprintf(object_with_suffix, 299, "%s%s", object, suffix);
// get cache filename
char *fname = NULL;
if (qry_object_fname(schema, fs_type, object_with_suffix, &fname) != EXIT_SUCCESS) {
logmsg(LOG_ERROR, "dbr_refresh_object(): unable to determine cache filename for [%s] [%s].[%s]", ora_type, schema, object_with_suffix);
if (fname != NULL)
free(fname);
if (suffix != NULL)
free(suffix);
if (fs_type != NULL)
free(fs_type);
return EXIT_FAILURE;
}
// if cache file is already up2date
if (tfs_validate2(fname, last_ddl_time) == EXIT_SUCCESS) {
// then mark it as verified by this mount
if (tfs_setldt(fname, last_ddl_time) != EXIT_SUCCESS) {
logmsg(LOG_ERROR, "dbr_refresh_object(): unable to mark [%s] [%s].[%s] as verified by this mount.", ora_type, schema, object);
if (fname != NULL)
free(fname);
if (suffix != NULL)
free(suffix);
if (fs_type != NULL)
free(fs_type);
return EXIT_FAILURE;
}
}
free(fname);
free(suffix);
free(fs_type);
return EXIT_SUCCESS;
}
static int dbr_delete_obsolete() {
#ifdef _MSC_VER
logmsg(LOG_ERROR, "dbr_delete_obsolete() - this function is not yet implemented for Windows platform!");
return EXIT_FAILURE;
#else
char cache_fn[4096];
DIR *dir = opendir(g_conf._temppath);
if (dir == NULL) {
logmsg(LOG_ERROR, "dbr_delete_obsolete() - unable to open directory: %d - %s", errno, strerror(errno));
return EXIT_FAILURE;
}
struct dirent *dir_entry = NULL;
while ((dir_entry = readdir(dir)) != NULL) {
if (dir_entry->d_type != DT_REG)
continue;
size_t name_len = strlen(dir_entry->d_name);
if (name_len < 5)
continue;
char *suffix = dir_entry->d_name + name_len - 4;
if (strcmp(suffix, ".tmp") != 0)
continue;
snprintf(cache_fn, 4095, "%s/%s", g_conf._temppath, dir_entry->d_name);
time_t last_ddl_time = 0;
time_t mount_stamp = 0;
pid_t mount_pid = 0;
if (tfs_getldt(cache_fn, &last_ddl_time, &mount_pid, &mount_stamp) != EXIT_SUCCESS) {
logmsg(LOG_ERROR, "dbr_delete_obsolete() - tfs_getldt returned error");
closedir(dir);
return EXIT_FAILURE;
}
if ((mount_pid != g_conf._mount_pid) || (mount_stamp != g_conf._mount_stamp)) {
tfs_rmfile(cache_fn);
logmsg(LOG_DEBUG, "dbr_delete_obsolete() - removed obsolete cache file [%s]", cache_fn);
}
}
closedir(dir);
return EXIT_SUCCESS;
#endif
}
int dbr_refresh_cache() {
int retval = EXIT_SUCCESS;
const char *query =
"select o.owner, o.object_type, o.object_name, \
to_char(o.last_ddl_time, 'yyyy-mm-dd hh24:mi:ss') as last_ddl_time\
from all_objects o\
where generated='N'\
and (o.object_type != 'TYPE' or o.subobject_name IS NULL)\
and object_type IN (\
'TABLE',\
'VIEW',\
'PROCEDURE',\
'FUNCTION',\
'PACKAGE',\
'PACKAGE BODY',\
'TRIGGER',\
'TYPE',\
'TYPE BODY',\
'JAVA SOURCE')";
ORA_STMT_PREPARE(dbr_refresh_state);
ORA_STMT_DEFINE_STR_I(dbr_refresh_state, 1, schema, 300);
ORA_STMT_DEFINE_STR_I(dbr_refresh_state, 2, type, 300);
ORA_STMT_DEFINE_STR_I(dbr_refresh_state, 3, object, 300);
ORA_STMT_DEFINE_STR_I(dbr_refresh_state, 4, last_ddl_time, 25);
ORA_STMT_EXECUTE(dbr_refresh_state, 0);
while (ORA_STMT_FETCH) {
dbr_refresh_object(
ORA_NVL(schema, "_UNKNOWN_SCHEMA_"),
ORA_NVL(type, "_UNKNOWN_TYPE_"),
ORA_NVL(object, "_UNKNOWN_OBJECT_"),
utl_str2time(ORA_NVL(last_ddl_time, "1990-01-01 03:00:01")));
}
dbr_delete_obsolete();
dbr_refresh_state_cleanup:
ORA_STMT_FREE;
return retval;
}
| usrecnik/ddlfs | src/dbro_refresh.c | C | mit | 5,152 |
/*****************************************************************
* syscall.c
* adapted from MIT xv6 by Zhiyi Huang, hzy@cs.otago.ac.nz
* University of Otago
*
********************************************************************/
#include "types.h"
#include "defs.h"
#include "param.h"
#include "memlayout.h"
#include "mmu.h"
#include "proc.h"
#include "arm.h"
#include "syscall.h"
// User code makes a system call with INT T_SYSCALL.
// System call number in %eax.
// Arguments on the stack, from the user call to the C
// library system call function. The saved user %esp points
// to a saved program counter, and then the first argument.
// Fetch the int at addr from the current process.
int
fetchint(uint addr, int *ip)
{
if(addr >= curr_proc->sz || addr+4 > curr_proc->sz)
return -1;
*ip = *(int*)(addr);
return 0;
}
// Fetch the nul-terminated string at addr from the current process.
// Doesn't actually copy the string - just sets *pp to point at it.
// Returns length of string, not including nul.
int
fetchstr(uint addr, char **pp)
{
char *s, *ep;
if(addr >= curr_proc->sz)
return -1;
*pp = (char*)addr;
ep = (char*)curr_proc->sz;
for(s = *pp; s < ep; s++)
if(*s == 0)
return s - *pp;
return -1;
}
// Fetch the nth 32-bit system call argument.
int
argint(int n, int *ip)
{
return fetchint(curr_proc->tf->sp + 4*n, ip);
}
// Fetch the nth word-sized system call argument as a pointer
// to a block of memory of size n bytes. Check that the pointer
// lies within the process address space.
int
argptr(int n, char **pp, int size)
{
int i;
if(argint(n, &i) < 0)
return -1;
if((uint)i >= curr_proc->sz || (uint)i+size > curr_proc->sz)
return -1;
*pp = (char*)i;
return 0;
}
// Fetch the nth word-sized system call argument as a string pointer.
// Check that the pointer is valid and the string is nul-terminated.
// (There is no shared writable memory, so the string can't change
// between this check and being used by the kernel.)
int
argstr(int n, char **pp)
{
int addr;
if(argint(n, &addr) < 0)
return -1;
return fetchstr(addr, pp);
}
extern int sys_chdir(void);
extern int sys_close(void);
extern int sys_dup(void);
extern int sys_exec(void);
extern int sys_exit(void);
extern int sys_fork(void);
extern int sys_fstat(void);
extern int sys_getpid(void);
extern int sys_kill(void);
extern int sys_link(void);
extern int sys_mkdir(void);
extern int sys_mknod(void);
extern int sys_open(void);
extern int sys_pipe(void);
extern int sys_read(void);
extern int sys_sbrk(void);
extern int sys_sleep(void);
extern int sys_unlink(void);
extern int sys_wait(void);
extern int sys_write(void);
extern int sys_uptime(void);
static int (*syscalls[])(void) = {
[SYS_fork] sys_fork,
[SYS_exit] sys_exit,
[SYS_wait] sys_wait,
[SYS_pipe] sys_pipe,
[SYS_read] sys_read,
[SYS_kill] sys_kill,
[SYS_exec] sys_exec,
[SYS_fstat] sys_fstat,
[SYS_chdir] sys_chdir,
[SYS_dup] sys_dup,
[SYS_getpid] sys_getpid,
[SYS_sbrk] sys_sbrk,
[SYS_sleep] sys_sleep,
[SYS_uptime] sys_uptime,
[SYS_open] sys_open,
[SYS_write] sys_write,
[SYS_mknod] sys_mknod,
[SYS_unlink] sys_unlink,
[SYS_link] sys_link,
[SYS_mkdir] sys_mkdir,
[SYS_close] sys_close,
};
void
syscall(void)
{
int num;
num = curr_proc->tf->r0;
if(num > 0 && num < NELEM(syscalls) && syscalls[num]) {
// cprintf("\n%d %s: sys call %d syscall address %x\n",
// curr_proc->pid, curr_proc->name, num, syscalls[num]);
if(num == SYS_exec) {
if(syscalls[num]() == -1) curr_proc->tf->r0 = -1;
} else curr_proc->tf->r0 = syscalls[num]();
} else {
cprintf("%d %s: unknown sys call %d\n",
curr_proc->pid, curr_proc->name, num);
curr_proc->tf->r0 = -1;
}
}
| fosler/xv6-rpi-port | source/syscall.c | C | mit | 3,791 |
/*******************************************************************************
Copyright © 2016, STMicroelectronics International N.V.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of STMicroelectronics nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS ARE DISCLAIMED.
IN NO EVENT SHALL STMICROELECTRONICS INTERNATIONAL N.V. BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
#include "vl53l0x_api.h"
#include "vl53l0x_api_core.h"
#include "vl53l0x_api_strings.h"
#ifndef __KERNEL__
#include <stdlib.h>
#endif
#define LOG_FUNCTION_START(fmt, ...) \
_LOG_FUNCTION_START(TRACE_MODULE_API, fmt, ##__VA_ARGS__)
#define LOG_FUNCTION_END(status, ...) \
_LOG_FUNCTION_END(TRACE_MODULE_API, status, ##__VA_ARGS__)
#define LOG_FUNCTION_END_FMT(status, fmt, ...) \
_LOG_FUNCTION_END_FMT(TRACE_MODULE_API, status, fmt, ##__VA_ARGS__)
VL53L0X_Error VL53L0X_check_part_used(VL53L0X_DEV Dev,
uint8_t *Revision,
VL53L0X_DeviceInfo_t *pVL53L0X_DeviceInfo) {
VL53L0X_Error Status = VL53L0X_ERROR_NONE;
uint8_t ModuleIdInt;
char *ProductId_tmp;
LOG_FUNCTION_START("");
Status = VL53L0X_get_info_from_device(Dev, 2);
if (Status == VL53L0X_ERROR_NONE) {
ModuleIdInt = VL53L0X_GETDEVICESPECIFICPARAMETER(Dev, ModuleId);
if (ModuleIdInt == 0) {
*Revision = 0;
VL53L0X_COPYSTRING(pVL53L0X_DeviceInfo->ProductId, "");
} else {
*Revision = VL53L0X_GETDEVICESPECIFICPARAMETER(Dev, Revision);
ProductId_tmp = VL53L0X_GETDEVICESPECIFICPARAMETER(Dev,
ProductId);
VL53L0X_COPYSTRING(pVL53L0X_DeviceInfo->ProductId, ProductId_tmp);
}
}
LOG_FUNCTION_END(Status);
return Status;
}
VL53L0X_Error VL53L0X_get_device_info(VL53L0X_DEV Dev,
VL53L0X_DeviceInfo_t *pVL53L0X_DeviceInfo) {
VL53L0X_Error Status = VL53L0X_ERROR_NONE;
uint8_t revision_id;
uint8_t Revision;
Status = VL53L0X_check_part_used(Dev, &Revision, pVL53L0X_DeviceInfo);
if (Status == VL53L0X_ERROR_NONE) {
if (Revision == 0) {
VL53L0X_COPYSTRING(pVL53L0X_DeviceInfo->Name,
VL53L0X_STRING_DEVICE_INFO_NAME_TS0);
} else if ((Revision <= 34) && (Revision != 32)) {
VL53L0X_COPYSTRING(pVL53L0X_DeviceInfo->Name,
VL53L0X_STRING_DEVICE_INFO_NAME_TS1);
} else if (Revision < 39) {
VL53L0X_COPYSTRING(pVL53L0X_DeviceInfo->Name,
VL53L0X_STRING_DEVICE_INFO_NAME_TS2);
} else {
VL53L0X_COPYSTRING(pVL53L0X_DeviceInfo->Name,
VL53L0X_STRING_DEVICE_INFO_NAME_ES1);
}
VL53L0X_COPYSTRING(pVL53L0X_DeviceInfo->Type,
VL53L0X_STRING_DEVICE_INFO_TYPE);
}
if (Status == VL53L0X_ERROR_NONE) {
Status = VL53L0X_RdByte(Dev, VL53L0X_REG_IDENTIFICATION_MODEL_ID,
&pVL53L0X_DeviceInfo->ProductType);
}
if (Status == VL53L0X_ERROR_NONE) {
Status = VL53L0X_RdByte(Dev,
VL53L0X_REG_IDENTIFICATION_REVISION_ID,
&revision_id);
pVL53L0X_DeviceInfo->ProductRevisionMajor = 1;
pVL53L0X_DeviceInfo->ProductRevisionMinor =
(revision_id & 0xF0) >> 4;
}
return Status;
}
VL53L0X_Error VL53L0X_get_device_error_string(VL53L0X_DeviceError ErrorCode,
char *pDeviceErrorString) {
VL53L0X_Error Status = VL53L0X_ERROR_NONE;
LOG_FUNCTION_START("");
switch (ErrorCode) {
case VL53L0X_DEVICEERROR_NONE:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_NONE);
break;
case VL53L0X_DEVICEERROR_VCSELCONTINUITYTESTFAILURE:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_VCSELCONTINUITYTESTFAILURE);
break;
case VL53L0X_DEVICEERROR_VCSELWATCHDOGTESTFAILURE:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_VCSELWATCHDOGTESTFAILURE);
break;
case VL53L0X_DEVICEERROR_NOVHVVALUEFOUND:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_NOVHVVALUEFOUND);
break;
case VL53L0X_DEVICEERROR_MSRCNOTARGET:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_MSRCNOTARGET);
break;
case VL53L0X_DEVICEERROR_SNRCHECK:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_SNRCHECK);
break;
case VL53L0X_DEVICEERROR_RANGEPHASECHECK:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_RANGEPHASECHECK);
break;
case VL53L0X_DEVICEERROR_SIGMATHRESHOLDCHECK:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_SIGMATHRESHOLDCHECK);
break;
case VL53L0X_DEVICEERROR_TCC:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_TCC);
break;
case VL53L0X_DEVICEERROR_PHASECONSISTENCY:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_PHASECONSISTENCY);
break;
case VL53L0X_DEVICEERROR_MINCLIP:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_MINCLIP);
break;
case VL53L0X_DEVICEERROR_RANGECOMPLETE:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_RANGECOMPLETE);
break;
case VL53L0X_DEVICEERROR_ALGOUNDERFLOW:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_ALGOUNDERFLOW);
break;
case VL53L0X_DEVICEERROR_ALGOOVERFLOW:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_ALGOOVERFLOW);
break;
case VL53L0X_DEVICEERROR_RANGEIGNORETHRESHOLD:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_DEVICEERROR_RANGEIGNORETHRESHOLD);
break;
default:
VL53L0X_COPYSTRING(pDeviceErrorString,
VL53L0X_STRING_UNKNOW_ERROR_CODE);
}
LOG_FUNCTION_END(Status);
return Status;
}
VL53L0X_Error VL53L0X_get_range_status_string(uint8_t RangeStatus,
char *pRangeStatusString) {
VL53L0X_Error Status = VL53L0X_ERROR_NONE;
LOG_FUNCTION_START("");
switch (RangeStatus) {
case 0:
VL53L0X_COPYSTRING(pRangeStatusString,
VL53L0X_STRING_RANGESTATUS_RANGEVALID);
break;
case 1:
VL53L0X_COPYSTRING(pRangeStatusString,
VL53L0X_STRING_RANGESTATUS_SIGMA);
break;
case 2:
VL53L0X_COPYSTRING(pRangeStatusString,
VL53L0X_STRING_RANGESTATUS_SIGNAL);
break;
case 3:
VL53L0X_COPYSTRING(pRangeStatusString,
VL53L0X_STRING_RANGESTATUS_MINRANGE);
break;
case 4:
VL53L0X_COPYSTRING(pRangeStatusString,
VL53L0X_STRING_RANGESTATUS_PHASE);
break;
case 5:
VL53L0X_COPYSTRING(pRangeStatusString,
VL53L0X_STRING_RANGESTATUS_HW);
break;
default: /**/
VL53L0X_COPYSTRING(pRangeStatusString,
VL53L0X_STRING_RANGESTATUS_NONE);
}
LOG_FUNCTION_END(Status);
return Status;
}
VL53L0X_Error VL53L0X_get_pal_error_string(VL53L0X_Error PalErrorCode,
char *pPalErrorString) {
VL53L0X_Error Status = VL53L0X_ERROR_NONE;
LOG_FUNCTION_START("");
switch (PalErrorCode) {
case VL53L0X_ERROR_NONE:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_NONE);
break;
case VL53L0X_ERROR_CALIBRATION_WARNING:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_CALIBRATION_WARNING);
break;
case VL53L0X_ERROR_MIN_CLIPPED:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_MIN_CLIPPED);
break;
case VL53L0X_ERROR_UNDEFINED:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_UNDEFINED);
break;
case VL53L0X_ERROR_INVALID_PARAMS:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_INVALID_PARAMS);
break;
case VL53L0X_ERROR_NOT_SUPPORTED:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_NOT_SUPPORTED);
break;
case VL53L0X_ERROR_INTERRUPT_NOT_CLEARED:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_INTERRUPT_NOT_CLEARED);
break;
case VL53L0X_ERROR_RANGE_ERROR:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_RANGE_ERROR);
break;
case VL53L0X_ERROR_TIME_OUT:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_TIME_OUT);
break;
case VL53L0X_ERROR_MODE_NOT_SUPPORTED:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_MODE_NOT_SUPPORTED);
break;
case VL53L0X_ERROR_BUFFER_TOO_SMALL:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_BUFFER_TOO_SMALL);
break;
case VL53L0X_ERROR_GPIO_NOT_EXISTING:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_GPIO_NOT_EXISTING);
break;
case VL53L0X_ERROR_GPIO_FUNCTIONALITY_NOT_SUPPORTED:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_GPIO_FUNCTIONALITY_NOT_SUPPORTED);
break;
case VL53L0X_ERROR_CONTROL_INTERFACE:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_CONTROL_INTERFACE);
break;
case VL53L0X_ERROR_INVALID_COMMAND:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_INVALID_COMMAND);
break;
case VL53L0X_ERROR_DIVISION_BY_ZERO:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_DIVISION_BY_ZERO);
break;
case VL53L0X_ERROR_REF_SPAD_INIT:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_REF_SPAD_INIT);
break;
case VL53L0X_ERROR_NOT_IMPLEMENTED:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_ERROR_NOT_IMPLEMENTED);
break;
default:
VL53L0X_COPYSTRING(pPalErrorString,
VL53L0X_STRING_UNKNOW_ERROR_CODE);
}
LOG_FUNCTION_END(Status);
return Status;
}
VL53L0X_Error VL53L0X_get_pal_state_string(VL53L0X_State PalStateCode,
char *pPalStateString) {
VL53L0X_Error Status = VL53L0X_ERROR_NONE;
LOG_FUNCTION_START("");
switch (PalStateCode) {
case VL53L0X_STATE_POWERDOWN:
VL53L0X_COPYSTRING(pPalStateString,
VL53L0X_STRING_STATE_POWERDOWN);
break;
case VL53L0X_STATE_WAIT_STATICINIT:
VL53L0X_COPYSTRING(pPalStateString,
VL53L0X_STRING_STATE_WAIT_STATICINIT);
break;
case VL53L0X_STATE_STANDBY:
VL53L0X_COPYSTRING(pPalStateString,
VL53L0X_STRING_STATE_STANDBY);
break;
case VL53L0X_STATE_IDLE:
VL53L0X_COPYSTRING(pPalStateString,
VL53L0X_STRING_STATE_IDLE);
break;
case VL53L0X_STATE_RUNNING:
VL53L0X_COPYSTRING(pPalStateString,
VL53L0X_STRING_STATE_RUNNING);
break;
case VL53L0X_STATE_UNKNOWN:
VL53L0X_COPYSTRING(pPalStateString,
VL53L0X_STRING_STATE_UNKNOWN);
break;
case VL53L0X_STATE_ERROR:
VL53L0X_COPYSTRING(pPalStateString,
VL53L0X_STRING_STATE_ERROR);
break;
default:
VL53L0X_COPYSTRING(pPalStateString,
VL53L0X_STRING_STATE_UNKNOWN);
}
LOG_FUNCTION_END(Status);
return Status;
}
VL53L0X_Error VL53L0X_get_sequence_steps_info(
VL53L0X_SequenceStepId SequenceStepId,
char *pSequenceStepsString) {
VL53L0X_Error Status = VL53L0X_ERROR_NONE;
LOG_FUNCTION_START("");
switch (SequenceStepId) {
case VL53L0X_SEQUENCESTEP_TCC:
VL53L0X_COPYSTRING(pSequenceStepsString,
VL53L0X_STRING_SEQUENCESTEP_TCC);
break;
case VL53L0X_SEQUENCESTEP_DSS:
VL53L0X_COPYSTRING(pSequenceStepsString,
VL53L0X_STRING_SEQUENCESTEP_DSS);
break;
case VL53L0X_SEQUENCESTEP_MSRC:
VL53L0X_COPYSTRING(pSequenceStepsString,
VL53L0X_STRING_SEQUENCESTEP_MSRC);
break;
case VL53L0X_SEQUENCESTEP_PRE_RANGE:
VL53L0X_COPYSTRING(pSequenceStepsString,
VL53L0X_STRING_SEQUENCESTEP_PRE_RANGE);
break;
case VL53L0X_SEQUENCESTEP_FINAL_RANGE:
VL53L0X_COPYSTRING(pSequenceStepsString,
VL53L0X_STRING_SEQUENCESTEP_FINAL_RANGE);
break;
default:
Status = VL53L0X_ERROR_INVALID_PARAMS;
}
LOG_FUNCTION_END(Status);
return Status;
}
VL53L0X_Error VL53L0X_get_limit_check_info(VL53L0X_DEV Dev, uint16_t LimitCheckId,
char *pLimitCheckString) {
VL53L0X_Error Status = VL53L0X_ERROR_NONE;
LOG_FUNCTION_START("");
switch (LimitCheckId) {
case VL53L0X_CHECKENABLE_SIGMA_FINAL_RANGE:
VL53L0X_COPYSTRING(pLimitCheckString,
VL53L0X_STRING_CHECKENABLE_SIGMA_FINAL_RANGE);
break;
case VL53L0X_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE:
VL53L0X_COPYSTRING(pLimitCheckString,
VL53L0X_STRING_CHECKENABLE_SIGNAL_RATE_FINAL_RANGE);
break;
case VL53L0X_CHECKENABLE_SIGNAL_REF_CLIP:
VL53L0X_COPYSTRING(pLimitCheckString,
VL53L0X_STRING_CHECKENABLE_SIGNAL_REF_CLIP);
break;
case VL53L0X_CHECKENABLE_RANGE_IGNORE_THRESHOLD:
VL53L0X_COPYSTRING(pLimitCheckString,
VL53L0X_STRING_CHECKENABLE_RANGE_IGNORE_THRESHOLD);
break;
case VL53L0X_CHECKENABLE_SIGNAL_RATE_MSRC:
VL53L0X_COPYSTRING(pLimitCheckString,
VL53L0X_STRING_CHECKENABLE_SIGNAL_RATE_MSRC);
break;
case VL53L0X_CHECKENABLE_SIGNAL_RATE_PRE_RANGE:
VL53L0X_COPYSTRING(pLimitCheckString,
VL53L0X_STRING_CHECKENABLE_SIGNAL_RATE_PRE_RANGE);
break;
default:
VL53L0X_COPYSTRING(pLimitCheckString,
VL53L0X_STRING_UNKNOW_ERROR_CODE);
}
LOG_FUNCTION_END(Status);
return Status;
}
| svanacker/cen-electronic | drivers/tof/vl53l0x/vl53l0x_api_strings.c | C | mit | 17,182 |
/**
* The MIT License (MIT)
*
*
* Copyright (C) 2013 Yu Jing (yujing5b5d@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute,sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED,INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#include "dirTraversal.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef WIN32 // for linux
#undef __STRICT_ANSI__
#define D_GNU_SOURCE
#define _GNU_SOURCE
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/dir.h>
#else // for windows
#include <io.h>
#endif //WIN32
#ifndef WIN32 // for linux
inline int isDir(const char* path)
{
struct stat st;
lstat(path, &st);
return S_ISDIR(st.st_mode);
}
//
int doTraversal(const char *path, int recursive,file_callback xCallback,void * usr)
{
DIR *pdir;
struct dirent *pdirent;
char tmp[1024];
pdir = opendir(path);
if(pdir)
{
while((pdirent = readdir(pdir)) != 0)
{
//ignore "." && ".."
if(!strcmp(pdirent->d_name, ".")||
!strcmp(pdirent->d_name, "..")) continue;
sprintf(tmp, "%s/%s", path, pdirent->d_name);
xCallback(usr,tmp,isDir(tmp));
//if is Dir and recursive is true , into recursive
if(isDir(tmp) && recursive)
{
doTraversal(tmp, recursive,xCallback,usr);
}
}
}else
{
fprintf(stderr,"opendir error:%s\n", path);
}
closedir(pdir);
return 1;
}
//interface
int dirTraversal(const char *path, int recursive,file_callback xCallback,void * usr)
{
int len;
char tmp[256];
len = strlen(path);
strcpy(tmp, path);
if(tmp[len - 1] == '/') tmp[len -1] = '\0';
if(isDir(tmp))
{
doTraversal(tmp, recursive,xCallback,usr);
}
else
{
//printf("%s\n", path);
xCallback(usr,path,isDir(path));
}
return 1;
}
#else //for windows
/**
*
*/
//int dirTraversal(const char *path, int recursive,file_callback xCallback)
int dirTraversal(const char *path, int recursive,file_callback xCallback,void * usr)
{
int len = strlen(path)+3;
long handle;
char mypath[1024];
char searchpath[1024];
char tmppath[1024];
char nxtpath[1024];
char sp = '/';
int i;
struct _finddata_t fileinfo;
sprintf(mypath,"%s",path);
switch(mypath[len-1])
{
case '\\':
sp = '\\';
len -= 1;
mypath[len-1] = '\0';
break;
case '/':
len -= 1;
mypath[len-1] = '\0';
case '.':
sp = '/';
break;
default :
for(i=0;i<len;i++)
{
if(mypath[i]=='\\'||mypath[i]=='/')
{
sp = mypath[i];
break;
}
}
}
sprintf(tmppath,"%s",mypath);
sprintf(searchpath,"%s%c%s",mypath,sp,"*");
for(handle=_findfirst(searchpath,&fileinfo);!_findnext(handle,&fileinfo);)
{
if(-1==handle) return -1;
//
sprintf(nxtpath,"%s%c%s",tmppath,sp,fileinfo.name);
// call back
if((0 != strcmp(fileinfo.name,"."))
&& (0 != strcmp(fileinfo.name,"..")))
xCallback(usr,nxtpath,((fileinfo.attrib & _A_SUBDIR)!=0));
if(((fileinfo.attrib & _A_SUBDIR)!=0)
&& recursive
&& 0 != strcmp(fileinfo.name,".")
&& 0 != strcmp(fileinfo.name,".."))
dirTraversal(nxtpath,recursive,xCallback,usr);
}
_findclose(handle);
return 1;
}
#endif //end of linux/windows
| yuikns/eiparser | src/dirTraversal.c | C | mit | 4,209 |
#include "../common/gba.h"
#include "../common/fixed.c"
typedef struct{
union{
struct{
fixed x;
fixed y;
};
fixed vec[2];
};
} Vec2;
fixed DotProduct(Vec2 a, Vec2 b){
return fixMult(a.x, b.x) + fixMult(a.y, b.y);
}
Vec2 VecSub(Vec2 a, Vec2 b){
Vec2 retVal = {a.x - b.x, a.y - b.y};
return retVal;
}
Vec2 VecAdd(Vec2 a, Vec2 b){
Vec2 retVal = {a.x + b.x, a.y + b.y};
return retVal;
}
Vec2 VecScale(Vec2 v, fixed s){
Vec2 retVal = {fixMult(v.x, s), fixMult(v.y, s)};
return retVal;
}
Vec2 AngleToVec(fixed angle){
Vec2 forward = {mySin(angle), myCos(angle)};
return forward;
}
typedef struct{
Vec2 start;
Vec2 end;
rgb15 col;
} Wall;
#define MAX_WALL_COUNT 20
Wall walls[MAX_WALL_COUNT];
int wallCount = 0;
void AddWall(Wall wall){
walls[wallCount] = wall;
wallCount++;
}
#define FRAME_MEM ((volatile uint16*)MEM_VRAM)
static inline fixed mySqrt(fixed in){
int reduce = (in >= makeFixed(4));
if(reduce){
in /= 4;
}
in -= FIXED_ONE;
fixed guess = FIXED_ONE + in/2 - fixMult(in,in)/8 + fixPow(in,3)/16 - 5*fixPow(in,4)/128 + 7*fixPow(in,5)/256;
in += FIXED_ONE;
for(int i = 0; i < 10; i++){
if(guess == 0){
break;
}
guess = (guess + fixDiv(in, guess))/2;
}
if(reduce){
guess *= 2;
}
return abs(guess);
}
int main(void) {
INT_VECTOR = InterruptMain;
BNS_REG_IME = 0;
REG_DISPSTAT |= LCDC_VBL;
BNS_REG_IE |= IRQ_VBLANK;
BNS_REG_IME = 1;
REG_DISPLAY = 0x0403;
for(int i = 0; i < SCREEN_WIDTH*SCREEN_HEIGHT; i++){
FRAME_MEM[i] = 0;
}
Wall firstWall = {{fixedFromFlt(-5.0f), fixedFromFlt(0.0f)}, {fixedFromFlt(5.0f), fixedFromFlt(4.0f)}, 0x3448};
AddWall(firstWall);
fixed playerAngle = 0;
Vec2 playerPos = {fixedFromFlt(0.0f), fixedFromFlt(-4.0f)};
uint32 keyStates = 0;
uint32 prevStates = 0;
while(1){
asm("swi 0x05");
keyStates = ~REG_KEY_INPUT & KEY_ANY;
Vec2 playerForward = AngleToVec(playerAngle);
Vec2 playerRight = {playerForward.y, -playerForward.x};
if(keyStates & KEY_UP){
playerPos = VecAdd(playerPos, VecScale(playerForward, fixedFromFlt(0.f)));
}
if(keyStates & KEY_DOWN){
playerPos = VecSub(playerPos, VecScale(playerForward, fixedFromFlt(0.5f)));
}
if(keyStates & KEY_LEFT){
playerPos = VecSub(playerPos, VecScale(playerRight, fixedFromFlt(0.f)));
}
if(keyStates & KEY_RIGHT){
playerPos = VecAdd(playerPos, VecScale(playerRight, fixedFromFlt(0.f)));
}
if(keyStates & BUTTON_L){
playerAngle += fixedFromFlt(2.5f);
}
if(keyStates & BUTTON_R){
playerAngle -= fixedFromFlt(2.5f);
}
//uint16
for(int i = 0; i < wallCount; i++){
Vec2 playerToWallStart = VecSub(walls[i].start, playerPos);
Vec2 playerToWallEnd = VecSub(walls[i].end, playerPos);
fixed forwardDotToStart = DotProduct(playerToWallStart, playerForward);
fixed forwardDotToEnd = DotProduct(playerToWallEnd, playerForward);
if(forwardDotToStart > 0 || forwardDotToEnd > 0){
Vec2 startProj = VecSub(walls[i].start, VecScale(playerForward, forwardDotToStart));
Vec2 endProj = VecSub(walls[i].end, VecScale(playerForward, forwardDotToEnd));
fixed startProjDotRight = DotProduct(startProj, playerRight);
fixed endProjDotRight = DotProduct(endProj, playerRight);
int32 pixelStart = roundFixedToInt(startProjDotRight*SCREEN_WIDTH)+SCREEN_WIDTH/2;
int32 pixelEnd = roundFixedToInt( endProjDotRight*SCREEN_WIDTH)+SCREEN_WIDTH/2;
fixed startDepth = mySqrt(forwardDotToStart);
fixed endDepth = mySqrt(forwardDotToEnd);
if(pixelStart > pixelEnd){
int32 temp = pixelStart;
pixelStart = pixelEnd;
pixelEnd = temp;
fixed depthTmp = startDepth;
startDepth = endDepth;
endDepth = depthTmp;
}
if(pixelEnd < 0 || pixelStart >= SCREEN_WIDTH){
continue;
}
else{
if(pixelStart < 0){
fixed ratio = makeFixed(-pixelStart)/makeFixed(pixelEnd-pixelStart);
pixelStart = 0;
startDepth = fixMult(FIXED_ONE-ratio, startDepth) + fixMult(ratio, endDepth);
}
if(pixelEnd >= SCREEN_WIDTH){
fixed ratio = makeFixed(pixelEnd - SCREEN_WIDTH)/makeFixed(pixelEnd);
pixelEnd = SCREEN_WIDTH - 1;
endDepth = fixMult(FIXED_ONE-ratio, endDepth) + fixMult(ratio, startDepth);
}
fixed depthIncrement = fixDiv(endDepth - startDepth, makeFixed(pixelEnd - pixelStart + 1));
fixed currDepth = startDepth;
rgb15 wallCol = walls[i].col;
for(int32 x = pixelStart; x <= pixelEnd; x++){
int32 wallHeight = roundFixedToInt(fixDiv(makeFixed(SCREEN_HEIGHT), currDepth));
int32 y = 0;
for(; y < SCREEN_HEIGHT/2-wallHeight; y++){
FRAME_MEM[y*SCREEN_WIDTH+x] = 0x4433;
}
for(;y < SCREEN_HEIGHT/2+wallHeight; y++){
FRAME_MEM[y*SCREEN_WIDTH+x] = wallCol;
}
for(;y < SCREEN_HEIGHT; y++){
FRAME_MEM[y*SCREEN_WIDTH+x] = 0x2211;
}
currDepth += depthIncrement;
}
}
}
}
prevStates = keyStates;
}
return 0;
} | Benjins/GBADev | 3dproper/main.c | C | mit | 5,048 |
/**
* @file sdram.c
* @brief SDRAM configuration
*
* @section License
*
* Copyright (C) 2010-2015 Oryx Embedded SARL. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* @author Oryx Embedded SARL (www.oryx-embedded.com)
* @version 1.6.4
**/
#include "sam3xa.h"
#include "sam3x_ek.h"
#include "sdram.h"
#include "error.h"
#include "debug.h"
/**
* @brief SDRAM initialization
* @param[in] coreClockFrequency Core clock frequency
**/
void sdramInit(uint32_t coreClockFrequency)
{
uint32_t n;
//Enable PIO peripheral clocks
PMC->PMC_PCER0 = (1 << ID_PIOC) | (1 << ID_PIOD);
//Enable SMC peripheral clock
PMC->PMC_PCER0 = (1 << ID_SMC);
//Assign SDRAM pins to Peripheral A function
PIOC->PIO_ABSR &= ~SDRAM_PIOC_MASK;
//Disable the PIO from controlling the corresponding pins
PIOC->PIO_PDR = SDRAM_PIOC_MASK;
//Enable pull-ups
PIOC->PIO_PUER = SDRAM_PIOC_MASK;
//Assign SDRAM pins to Peripheral A function
PIOD->PIO_ABSR &= ~SDRAM_PIOD_MASK;
//Disable the PIO from controlling the corresponding pins
PIOD->PIO_PDR = SDRAM_PIOD_MASK;
//Enable pull-ups
PIOD->PIO_PUER = SDRAM_PIOD_MASK;
//Configure SDRAM enable pin as an output
PIOD->PIO_PER = PIO_PD18;
PIOD->PIO_OER = PIO_PD18;
PIOD->PIO_SODR = PIO_PD18;
//SDRAM features must be set in the Configuration Register
SDRAMC->SDRAMC_CR = SDRAMC_CR_NC_COL9 | //Number of columns (512)
SDRAMC_CR_NR_ROW13 | //Number of rows (8192)
SDRAMC_CR_NB_BANK4 | //Number of banks (4)
SDRAMC_CR_CAS_LATENCY2 | //CAS latency (2 cycles)
SDRAMC_CR_DBW | //Data bus width (16 bits)
SDRAMC_CR_TWR(2) | //Write recovery delay (2 cycles)
SDRAMC_CR_TRC_TRFC(9) | //Row cycle delay (9 cycles)
SDRAMC_CR_TRP(3) | //Row precharge delay (3 cycles)
SDRAMC_CR_TRCD(3) | //Row to column delay (3 cycles)
SDRAMC_CR_TRAS(6) | //Active to precharge delay (6 cycles)
SDRAMC_CR_TXSR(10); //Exit self refresh to active delay (10 cycles)
//For mobile SDRAM, temperature-compensated self refresh (TCSR), drive strength (DS)
//and partial array self refresh (PASR) must be set in the Low Power Register
SDRAMC->SDRAMC_LPR = 0;
//The SDRAM memory type must be set in the Memory Device Register
SDRAMC->SDRAMC_MDR = SDRAMC_MDR_MD_SDRAM;
//A minimum pause of 200 us is provided to precede any signal toggle
sleep(1);
//A NOP command is issued to the SDRAM device. The application must set Mode to 1
//in the Mode Register and perform a write access to any SDRAM address
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_NOP;
*((uint16_t *)(SDRAM_BASE)) = 0x00000000;
//An All Banks Precharge command is issued to the SDRAM devices. The application must
//set Mode to 2 in the Mode Register and perform a write access to any SDRAM address
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_ALLBANKS_PRECHARGE;
*((uint16_t *)(SDRAM_BASE)) = 0x00000000;
//Eight auto-refresh (CBR) cycles are provided. The application must set the Mode to 4
//in the Mode Register and perform a write access to any SDRAM location eight times
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_AUTO_REFRESH;
*((uint16_t *)(SDRAM_BASE)) = 0x00000000;
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_AUTO_REFRESH;
*((uint16_t *)(SDRAM_BASE)) = 0x00000000;
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_AUTO_REFRESH;
*((uint16_t *)(SDRAM_BASE)) = 0x00000000;
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_AUTO_REFRESH;
*((uint16_t *)(SDRAM_BASE)) = 0x00000000;
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_AUTO_REFRESH;
*((uint16_t *)(SDRAM_BASE)) = 0x00000000;
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_AUTO_REFRESH;
*((uint16_t *)(SDRAM_BASE)) = 0x00000000;
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_AUTO_REFRESH;
*((uint16_t *)(SDRAM_BASE)) = 0x00000000;
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_AUTO_REFRESH;
*((uint16_t *)(SDRAM_BASE)) = 0x00000000;
//A Mode Register set (MRS) cycle is issued to program the parameters of the SDRAM device,
//in particular CAS latency and burst length. The application must set Mode to 3 in the
//Mode Register and perform a write access to the SDRAM. The write address must be chosen
//so that BA[1:0] are set to 0
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_LOAD_MODEREG;
*((uint16_t *)(SDRAM_BASE)) = 0x00000000;
//For mobile SDRAM initialization, an Extended Mode Register set (EMRS) cycle is
//issued to program the SDRAM parameters (TCSR, PASR, DS). The application must
//set Mode to 5 in the Mode Register and perform a write access to the SDRAM. The
//write address must be chosen so that BA[1] or BA[0] are set to 1
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_EXT_LOAD_MODEREG;
*((uint16_t *)(SDRAM_BASE) + 0x01000000) = 0x00000000;
//The application must go into Normal Mode, setting Mode to 0 in the Mode Register and
//performing a write access at any location in the SDRAM
SDRAMC->SDRAMC_MR = SDRAMC_MR_MODE_NORMAL;
*((uint16_t *)(SDRAM_BASE)) = 0x00000000;
//Set refresh rate (15.625us)
n = coreClockFrequency / 1000;
n = (n * 15625) / 1000000;
//Write the refresh rate into the count field in the SDRAMC Refresh Timer register
SDRAMC->SDRAMC_TR = SDRAMC_TR_COUNT(n);
}
/**
* @brief SDRAM test routine
* @return Error code
**/
error_t sdramTest(void)
{
uint_t i;
//Point to the beginning of the memory space
uint32_t *address = (uint32_t *) SDRAM_BASE;
//Initialize test pattern generation
uint32_t value = 0x12345678;
//Write SDRAM memory contents
for(i = 0; i < (SDRAM_SIZE / 4); i++)
{
//Write current location
*(address++) = value;
//Test pattern generation
value = value * 0x7AB5 + 0x5E8AC93D;
}
//Point to the beginning of the memory space
address = (uint32_t *) SDRAM_BASE;
//Initialize test pattern generation
value = 0x12345678;
//Read back and check SDRAM memory contents
for(i = 0; i < (SDRAM_SIZE / 4); i++)
{
//Read current location
if(*(address++) != value)
return ERROR_FAILURE;
//Test pattern generation
value = value * 0x7AB5 + 0x5E8AC93D;
}
//Successful test
return NO_ERROR;
}
| miragecentury/M2_SE_RTOS_Project | Project/LPC1549_Keil/CycloneTCP_SSL_Crypto_Open_1_6_4/demo/common/atmel/boards/sam3x_ek/sdram.c | C | mit | 7,070 |
/* Copyright (c) 2015 Mathias Panzenböck
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "au.h"
struct au_header {
uint32_t magic;
uint32_t data_offset;
uint32_t data_size;
uint32_t encoding;
uint32_t sample_rate;
uint32_t channels;
};
int au_isfile(const uint8_t *data, size_t input_len, size_t *lengthptr)
{
if (input_len < AU_HEADER_SIZE || MAGIC(data) != AU_MAGIC)
return 0;
const struct au_header *header = (const struct au_header *)data;
size_t data_offset = be32toh(header->data_offset);
size_t data_size = be32toh(header->data_size);
uint32_t encoding = be32toh(header->encoding);
uint32_t channels = be32toh(header->channels);
if (data_offset % 8 != 0 ||
encoding < 1 ||
encoding > 27 ||
channels == 0 ||
data_size == 0 ||
data_size == 0xffffffff)
return 0;
if (SIZE_MAX - data_offset < data_size)
return 0;
size_t length = data_offset + data_size;
// I'm pretty sure it's a truncated AU file when this happens
if (length > input_len)
length = input_len;
if (lengthptr)
*lengthptr = length;
return 1;
}
| panzi/mediaextract | src/au.c | C | mit | 2,119 |
/*---------------------------------------------------------------------------------
Name : amixer.c
Author : Marvin Raaijmakers
Description : Plugin for keyTouch that can change the volume (using amixer).
Date of last change: 24-Sep-2006
History : 24-Sep-2006 Added two new plugin functions:
"Volume increase 10%" and "Volume decrease 10%"
05-Mar-2006 - clean_exit() will be used to exit the client
process, that manages the volume bar, cleanly
- update_window() now returns a boolean indicating
if the function should be called again
29-Jan-2006 Added the GUI volume bar to the plugin
Copyright (C) 2005-2006 Marvin Raaijmakers
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-----------------------------------------------------------------------------------*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <gtk/gtk.h>
#include <time.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <string.h>
#include <plugin.h>
#include <amixer-plugin.h>
void vol_increase (KTPreferences *preferences);
void vol_decrease (KTPreferences *preferences);
void vol_increase_10 (KTPreferences *preferences);
void vol_decrease_10 (KTPreferences *preferences);
void mute (KTPreferences *preferences);
static void create_window (VOLUMEBAR_INFO *volumebar_info);
static int get_current_volume (void);
static void update_volume_bar (GtkWidget *volume_bar);
static gboolean update_window (VOLUMEBAR_INFO *volumebar_info);
static void clean_exit (int sig);
static void start_window (void);
static char *get_keytouch_user_dir (void);
static void change_volume (char *command);
static Boolean is_muted = FALSE;
KeytouchPlugin plugin_struct = {
{"Amixer", "Marvin Raaijmakers", "GPL 2", "2.3",
"This plugin allows you to change the volume. It also shows\n"
"the current volume when it changes. To use this plugin amixer\n"
"needs to be installed."},
"amixer.so",
5,
{{"Volume increase", KTPluginFunctionType_Function, {.function = vol_increase}},
{"Volume decrease", KTPluginFunctionType_Function, {.function = vol_decrease}},
{"Volume increase 10%", KTPluginFunctionType_Function, {.function = vol_increase_10}},
{"Volume decrease 10%", KTPluginFunctionType_Function, {.function = vol_decrease_10}},
{"Mute", KTPluginFunctionType_Function, {.function = mute}},
}
};
void
create_window (VOLUMEBAR_INFO *volumebar_info)
/*
Input:
-
Output:
volumebar_info - The window element points to the created window and the
volume_bar element points to the volume progressbar in the
window
Returns:
-
Description:
This function creates a window with a progressbar with the following
properties:
- It is positioned in the center ot the screen.
- It has no window decorations and can not be resized by the user.
- It will allways be above other windows.
- It is visible on all desktops.
- It will not be visible in the taskbar an pager.
- It does not accept focus.
*/
{
volumebar_info->window = gtk_window_new (GTK_WINDOW_TOPLEVEL);
gtk_window_set_position (GTK_WINDOW (volumebar_info->window), GTK_WIN_POS_CENTER);
gtk_window_set_resizable (GTK_WINDOW (volumebar_info->window), FALSE);
gtk_window_set_decorated (GTK_WINDOW (volumebar_info->window), FALSE);
/* The window will allways be above others */
gtk_window_set_keep_above (GTK_WINDOW (volumebar_info->window), TRUE);
/* Let the window be visible on all desktops: */
gtk_window_stick (GTK_WINDOW (volumebar_info->window));
/* This window will not be visible in the taskbar: */
gtk_window_set_skip_taskbar_hint (GTK_WINDOW (volumebar_info->window), TRUE);
/* This window will not be visible in the pager: */
gtk_window_set_skip_pager_hint (GTK_WINDOW (volumebar_info->window), TRUE);
gtk_window_set_accept_focus (GTK_WINDOW (volumebar_info->window), FALSE);
volumebar_info->volume_bar = gtk_progress_bar_new();
gtk_widget_show (volumebar_info->volume_bar);
gtk_container_add (GTK_CONTAINER (volumebar_info->window), volumebar_info->volume_bar);
gtk_widget_set_size_request (volumebar_info->volume_bar, 231, 24);
gtk_progress_bar_set_fraction (GTK_PROGRESS_BAR (volumebar_info->volume_bar), 0.52);
gtk_progress_bar_set_pulse_step (GTK_PROGRESS_BAR (volumebar_info->volume_bar), 0.02);
gtk_progress_bar_set_text (GTK_PROGRESS_BAR (volumebar_info->volume_bar), "Volume");
}
int
get_current_volume (void)
/*
Returns:
The current volume retrieved from amixer. -1 will be returned when
retrieving the volume failed.
*/
{
FILE *amixer;
char c;
int volume = -1;
amixer = popen ("amixer sget Master | grep \"Front Left:\"", "r");
if (amixer)
{
do {
c = getc(amixer);
/* We have found the volume when the following appears:
* '[' followed by an integer followed by '%'
*/
if (c == '[' && fscanf(amixer, "%d", &volume) && (c = getc(amixer)) == '%')
{
break;
}
volume = -1;
} while (c != '\n' && c != EOF);
pclose (amixer);
}
return (volume);
}
void
update_volume_bar (GtkWidget *volume_bar)
/*
Output:
volume_bar - Will show the percentage of the current volume
*/
{
int volume;
gchar *text;
volume = get_current_volume();
if (volume && volume != -1)
{
text = g_strdup_printf("Volume %d%%", volume);
if (text)
{
gtk_progress_bar_set_text (GTK_PROGRESS_BAR(volume_bar), text);
g_free (text);
}
}
else
{
volume = 0;
gtk_progress_bar_set_text (GTK_PROGRESS_BAR(volume_bar), "Muted");
}
gtk_progress_set_percentage (GTK_PROGRESS(volume_bar), (gdouble)volume/100.0);
/* Directly draw the progressbar: */
while (g_main_context_iteration(NULL, FALSE))
; /* NULL Statement */
}
gboolean
update_window (VOLUMEBAR_INFO *volumebar_info)
/*
Input:
volumebar_info->close_time - The time to close the window
Output:
volumebar_info - Will be updated
Returns:
TRUE if this function should be called again after UPDATE_INTERVAL
miliseconds, otherwise FALSE.
Description:
This function destroys volumebar_info->window and escapes from the GTK main
routine if the current time is later than volumebar_info->close_time. If not
then the volume bar will be updated with the current volume.
*/
{
MSGBUF msg;
Boolean close_window;
/* Check if there is a new message on the queue */
if (msgrcv(volumebar_info->msgqid, &msg, sizeof(msg.time), 1, IPC_NOWAIT) != -1)
{
volumebar_info->close_time = msg.time + SHOW_WINDOW_TIME;
}
close_window = (time(NULL) > volumebar_info->close_time);
if (!close_window)
{
update_volume_bar (volumebar_info->volume_bar);
}
else
{
gtk_widget_destroy (volumebar_info->window);
gtk_main_quit();
}
return !close_window;
}
void
start_window (void)
/*
Description:
This function creates a window with a volume bar and shows it
SHOW_WINDOW_TIME seconds when it receives a message on the message queue.
The key of the message queue is generated by running
ftok(get_keytouch_user_dir(), MSGQ_AMIXER_PROJ_ID). The messages that are
sent to this queue should contain the time they are sent. The volume window
will be showed from the time this function receives the message, until the
time the message was sent plus SHOW_WINDOW_TIME seconds.
*/
{
MSGBUF msg;
VOLUMEBAR_INFO volumebar_info;
key_t msgq_key;
char *keytouch_user_dir;
gtk_init (0, NULL);
keytouch_user_dir = get_keytouch_user_dir();
/* Get the key for the message queue */
msgq_key = ftok(keytouch_user_dir, MSGQ_AMIXER_PROJ_ID);
free (keytouch_user_dir);
if (msgq_key == -1)
{
perror ("keytouch amixer plugin");
return;
}
/* Get the message queue identifier and create the queue if necessary */
volumebar_info.msgqid = msgget(msgq_key, 0);
if (volumebar_info.msgqid == -1)
{
perror ("keytouch amixer plugin");
return;
}
while (1)
{
if (msgrcv(volumebar_info.msgqid, &msg, sizeof(msg.time), 1, 0) != -1)
{
volumebar_info.close_time = msg.time + SHOW_WINDOW_TIME;
if (time(NULL) <= volumebar_info.close_time)
{
create_window (&volumebar_info);
update_volume_bar (volumebar_info.volume_bar);
gtk_widget_show (volumebar_info.window);
g_timeout_add (UPDATE_INTERVAL, (GSourceFunc) update_window, &volumebar_info);
gtk_main();
}
}
}
}
char
*get_keytouch_user_dir (void)
/*
Returns:
The address of some new allocated space which is a string containing the
value of the environment variable HOME followed by "/.keytouch2".
*/
{
char *keytouch_dir, *home;
home = getenv("HOME");
if (home == NULL)
{
fputs ("keytouch amixer plugin: could not get environment variable $HOME", stderr);
exit (EXIT_FAILURE);
}
if (asprintf(&keytouch_dir, "%s/.keytouch2", home) == -1)
{
fputs ("keytouch amixer plugin: asprintf() failed. "
"This is probably caused because it failed to allocate memory.", stderr);
exit (EXIT_FAILURE);
}
return (keytouch_dir);
}
void
clean_exit (int sig)
{
exit (EXIT_SUCCESS);
}
void
send_volume_changed_signal (void)
/*
Description:
This function sends a signal to the child program that manages the
volumebar. The child will receive the signal and will show the volumebar.
The child process will be created if it does not exist yet.
*/
{
static int qid = -1;
MSGBUF msg;
/* If this is the first time this function was called */
if (qid == -1)
{
key_t msgq_key;
char *keytouch_user_dir;
keytouch_user_dir = get_keytouch_user_dir();
/* Get the key for the message queue */
msgq_key = ftok(keytouch_user_dir, MSGQ_AMIXER_PROJ_ID);
free (keytouch_user_dir);
if (msgq_key == -1)
{
perror ("keytouch amixer plugin");
return;
}
/* Get the message queue identifier and create the queue if necessary */
qid = msgget(msgq_key, MSGQ_PERMISSIONS | IPC_CREAT);
if (qid == -1)
{
perror ("keytouch amixer plugin");
return;
}
if (fork() == 0)
{
/* Trap key signals */
signal (SIGINT, clean_exit);
signal (SIGQUIT, clean_exit);
signal (SIGTERM, clean_exit);
/* We will now start the run_window() function in our
* child process for showing a volume bar to the user
*/
start_window();
exit (EXIT_SUCCESS); /* We will never get here because of
* the infinite loop in run_window()
*/
}
}
msg.mtype = 1;
msg.time = time(NULL);
if (msgsnd(qid, &msg, sizeof(msg.time), 0) == -1)
{
perror ("keytouch amixer plugin");
}
}
void
change_volume (char *command)
/*
Input:
command - The command that changes the volume.
Description:
This function executes 'command' in a child process and then calls
send_volume_changed_signal().
*/
{
if (fork() == 0)
{
execlp ("sh", "sh", "-c", command, NULL);
exit (EXIT_SUCCESS);
}
else
{
send_volume_changed_signal();
}
}
void
vol_increase (KTPreferences *preferences)
{
is_muted = FALSE;
change_volume ( CHANGE_VOL_CMD(VOL_DEFAULT_INCR) );
}
void
vol_decrease (KTPreferences *preferences)
{
is_muted &= !get_current_volume();
change_volume ( CHANGE_VOL_CMD(VOL_DEFAULT_DECR) );
}
void
vol_increase_10 (KTPreferences *preferences)
{
is_muted = FALSE;
change_volume ( CHANGE_VOL_CMD(VOL_10PERCENT_INCR) );
}
void
vol_decrease_10 (KTPreferences *preferences)
{
is_muted &= !get_current_volume();
change_volume ( CHANGE_VOL_CMD(VOL_10PERCENT_DECR) );
}
void
mute (KTPreferences *preferences)
{
static int prev_volume = -1;
int current_volume;
char *command = NULL;
current_volume = get_current_volume();
is_muted &= !current_volume;
if (is_muted)
{
/* Tell amixer to set the volume to prev_volume */
if (asprintf(&command, "amixer sset Master %d%% > /dev/null", prev_volume) == -1)
{
fputs ("keytouch amixer plugin: asprintf() failed. "
"This is probably caused because it failed to allocate memory.", stderr);
}
}
else if (current_volume)
{
/* Tell amixer to set the volume to 0 */
command = strdup("amixer sset Master 0% > /dev/null");
if (command == NULL)
{
perror ("keytouch amixer plugin");
}
prev_volume = current_volume;
}
/* Do we have to mute/unmute? */
if (command)
{
if (fork() == 0)
{
execlp ("sh", "sh", "-c", command, NULL);
exit (EXIT_SUCCESS);
}
else
{
send_volume_changed_signal();
}
free (command);
is_muted = !is_muted;
}
}
| paulmadore/G-Keymap | Reference Code/keymap/keytouch-2.2.4/plugins/amixer.c | C | mit | 13,098 |
#include "strm.h"
#include <math.h>
static int
num_plus(strm_stream* strm, int argc, strm_value* args, strm_value* ret)
{
strm_value x, y;
strm_get_args(strm, argc, args, "NN", &x, &y);
if (strm_int_p(x) && strm_int_p(y)) {
*ret = strm_int_value(strm_value_int(x)+strm_value_int(y));
return STRM_OK;
}
if (strm_number_p(x) && strm_number_p(y)) {
*ret = strm_float_value(strm_value_float(x)+strm_value_float(y));
return STRM_OK;
}
return STRM_NG;
}
static int
num_minus(strm_stream* strm, int argc, strm_value* args, strm_value* ret)
{
if (argc == 1) {
if (strm_int_p(args[0])) {
*ret = strm_int_value(-strm_value_int(args[0]));
return STRM_OK;
}
if (strm_float_p(args[0])) {
*ret = strm_float_value(-strm_value_float(args[0]));
return STRM_OK;
}
}
else {
strm_value x, y;
strm_get_args(strm, argc, args, "NN", &x, &y);
if (strm_int_p(x) && strm_int_p(y)) {
*ret = strm_int_value(strm_value_int(x)-strm_value_int(y));
return STRM_OK;
}
if (strm_number_p(x) && strm_number_p(y)) {
*ret = strm_float_value(strm_value_float(x)-strm_value_float(y));
return STRM_OK;
}
}
return STRM_NG;
}
static int
num_mult(strm_stream* strm, int argc, strm_value* args, strm_value* ret)
{
strm_value x, y;
strm_get_args(strm, argc, args, "NN", &x, &y);
if (strm_int_p(x) && strm_int_p(y)) {
*ret = strm_int_value(strm_value_int(x)*strm_value_int(y));
return STRM_OK;
}
*ret = strm_float_value(strm_value_float(x)*strm_value_float(y));
return STRM_OK;
}
static int
num_div(strm_stream* strm, int argc, strm_value* args, strm_value* ret)
{
double x, y;
strm_get_args(strm, argc, args, "ff", &x, &y);
*ret = strm_float_value(x/y);
return STRM_OK;
}
static int
num_bar(strm_stream* strm, int argc, strm_value* args, strm_value* ret)
{
strm_value x, y;
strm_get_args(strm, argc, args, "ii", &x, &y);
*ret = strm_int_value(strm_value_int(x)|strm_value_int(y));
return STRM_OK;
}
static int
num_mod(strm_stream* strm, int argc, strm_value* args, strm_value* ret)
{
strm_value x;
strm_int y;
strm_get_args(strm, argc, args, "Ni", &x, &y);
if (strm_int_p(x)) {
*ret = strm_int_value(strm_value_int(x)%y);
return STRM_OK;
}
if (strm_float_p(x)) {
*ret = strm_float_value(fmod(strm_value_float(x), y));
return STRM_OK;
}
return STRM_NG;
}
static int
num_gt(strm_stream* strm, int argc, strm_value* args, strm_value* ret)
{
double x, y;
strm_get_args(strm, argc, args, "ff", &x, &y);
*ret = strm_bool_value(x>y);
return STRM_OK;
}
static int
num_ge(strm_stream* strm, int argc, strm_value* args, strm_value* ret)
{
double x, y;
strm_get_args(strm, argc, args, "ff", &x, &y);
*ret = strm_bool_value(x>=y);
return STRM_OK;
}
static int
num_lt(strm_stream* strm, int argc, strm_value* args, strm_value* ret)
{
double x, y;
strm_get_args(strm, argc, args, "ff", &x, &y);
*ret = strm_bool_value(x<y);
return STRM_OK;
}
static int
num_le(strm_stream* strm, int argc, strm_value* args, strm_value* ret)
{
double x, y;
strm_get_args(strm, argc, args, "ff", &x, &y);
*ret = strm_bool_value(x<=y);
return STRM_OK;
}
static int
num_number(strm_stream* strm, int argc, strm_value* args, strm_value* ret)
{
strm_get_args(strm, argc, args, "N", ret);
return STRM_OK;
}
strm_state* strm_ns_number;
void
strm_number_init(strm_state* state)
{
strm_ns_number = strm_ns_new(NULL, "number");
strm_var_def(strm_ns_number, "+", strm_cfunc_value(num_plus));
strm_var_def(strm_ns_number, "-", strm_cfunc_value(num_minus));
strm_var_def(strm_ns_number, "*", strm_cfunc_value(num_mult));
strm_var_def(strm_ns_number, "/", strm_cfunc_value(num_div));
strm_var_def(strm_ns_number, "%", strm_cfunc_value(num_mod));
strm_var_def(strm_ns_number, "|", strm_cfunc_value(num_bar));
strm_var_def(strm_ns_number, "<", strm_cfunc_value(num_lt));
strm_var_def(strm_ns_number, "<=", strm_cfunc_value(num_le));
strm_var_def(strm_ns_number, ">", strm_cfunc_value(num_gt));
strm_var_def(strm_ns_number, ">=", strm_cfunc_value(num_ge));
strm_var_def(state, "number", strm_cfunc_value(num_number));
}
| matz/streem | src/number.c | C | mit | 4,205 |
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE78_OS_Command_Injection__wchar_t_connect_socket_w32spawnl_18.c
Label Definition File: CWE78_OS_Command_Injection.strings.label.xml
Template File: sources-sink-18.tmpl.c
*/
/*
* @description
* CWE: 78 OS Command Injection
* BadSource: connect_socket Read data using a connect socket (client side)
* GoodSource: Fixed string
* Sink: w32spawnl
* BadSink : execute command with wspawnl
* Flow Variant: 18 Control flow: goto statements
*
* */
#include "std_testcase.h"
#include <wchar.h>
#ifdef _WIN32
#define COMMAND_INT_PATH L"%WINDIR%\\system32\\cmd.exe"
#define COMMAND_INT L"cmd.exe"
#define COMMAND_ARG1 L"/c"
#define COMMAND_ARG2 L"dir"
#define COMMAND_ARG3 data
#else /* NOT _WIN32 */
#include <unistd.h>
#define COMMAND_INT_PATH L"/bin/sh"
#define COMMAND_INT L"sh"
#define COMMAND_ARG1 L"ls"
#define COMMAND_ARG2 L"-la"
#define COMMAND_ARG3 data
#endif
#ifdef _WIN32
#include <winsock2.h>
#include <windows.h>
#include <direct.h>
#pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */
#define CLOSE_SOCKET closesocket
#else /* NOT _WIN32 */
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#define INVALID_SOCKET -1
#define SOCKET_ERROR -1
#define CLOSE_SOCKET close
#define SOCKET int
#endif
#define TCP_PORT 27015
#define IP_ADDRESS "127.0.0.1"
#include <process.h>
#ifndef OMITBAD
void CWE78_OS_Command_Injection__wchar_t_connect_socket_w32spawnl_18_bad()
{
wchar_t * data;
wchar_t dataBuffer[100] = L"";
data = dataBuffer;
goto source;
source:
{
#ifdef _WIN32
WSADATA wsaData;
int wsaDataInit = 0;
#endif
int recvResult;
struct sockaddr_in service;
wchar_t *replace;
SOCKET connectSocket = INVALID_SOCKET;
size_t dataLen = wcslen(data);
do
{
#ifdef _WIN32
if (WSAStartup(MAKEWORD(2,2), &wsaData) != NO_ERROR)
{
break;
}
wsaDataInit = 1;
#endif
/* POTENTIAL FLAW: Read data using a connect socket */
connectSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (connectSocket == INVALID_SOCKET)
{
break;
}
memset(&service, 0, sizeof(service));
service.sin_family = AF_INET;
service.sin_addr.s_addr = inet_addr(IP_ADDRESS);
service.sin_port = htons(TCP_PORT);
if (connect(connectSocket, (struct sockaddr*)&service, sizeof(service)) == SOCKET_ERROR)
{
break;
}
/* Abort on error or the connection was closed, make sure to recv one
* less char than is in the recv_buf in order to append a terminator */
/* Abort on error or the connection was closed */
recvResult = recv(connectSocket, (char *)(data + dataLen), sizeof(wchar_t) * (100 - dataLen - 1), 0);
if (recvResult == SOCKET_ERROR || recvResult == 0)
{
break;
}
/* Append null terminator */
data[dataLen + recvResult / sizeof(wchar_t)] = L'\0';
/* Eliminate CRLF */
replace = wcschr(data, L'\r');
if (replace)
{
*replace = L'\0';
}
replace = wcschr(data, L'\n');
if (replace)
{
*replace = L'\0';
}
}
while (0);
if (connectSocket != INVALID_SOCKET)
{
CLOSE_SOCKET(connectSocket);
}
#ifdef _WIN32
if (wsaDataInit)
{
WSACleanup();
}
#endif
}
/* wspawnl - specify the path where the command is located */
/* POTENTIAL FLAW: Execute command without validating input possibly leading to command injection */
_wspawnl(_P_WAIT, COMMAND_INT_PATH, COMMAND_INT_PATH, COMMAND_ARG1, COMMAND_ARG2, COMMAND_ARG3, NULL);
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B() - use goodsource and badsink by reversing the blocks on the goto statement */
static void goodG2B()
{
wchar_t * data;
wchar_t dataBuffer[100] = L"";
data = dataBuffer;
goto source;
source:
/* FIX: Append a fixed string to data (not user / external input) */
wcscat(data, L"*.*");
/* wspawnl - specify the path where the command is located */
/* POTENTIAL FLAW: Execute command without validating input possibly leading to command injection */
_wspawnl(_P_WAIT, COMMAND_INT_PATH, COMMAND_INT_PATH, COMMAND_ARG1, COMMAND_ARG2, COMMAND_ARG3, NULL);
}
void CWE78_OS_Command_Injection__wchar_t_connect_socket_w32spawnl_18_good()
{
goodG2B();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE78_OS_Command_Injection__wchar_t_connect_socket_w32spawnl_18_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE78_OS_Command_Injection__wchar_t_connect_socket_w32spawnl_18_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
| maurer/tiamat | samples/Juliet/testcases/CWE78_OS_Command_Injection/s05/CWE78_OS_Command_Injection__wchar_t_connect_socket_w32spawnl_18.c | C | mit | 5,742 |
/* $Id: get_attachments.c,v 1.13 2015/07/20 10:35:53 tm Exp $
*
* PDFlib TET sample application.
*
* PDF text extractor which also searches PDF file attachments.
* The file attachments may be attached to the document or
* to page-level annotations of type FileAttachment. The former construct
* also covers PDF 1.7 packages (a.k.a. PDF collections).
*
* Nested attachments (file attachments within file attachments,
* or nested PDF packages) all embedded files are processed recursively.
*/
#include <stdio.h>
#include <string.h>
#include "tetlib.h"
/* global option list */
static const char *globaloptlist =
"searchpath={{../data} "
"{../../../resource/cmap}}";
/* document-specific option list */
static const char *docoptlist = "";
/* page-specific option list */
static const char *pageoptlist = "granularity=page";
/* separator to emit after each chunk of text. This depends on the
* application's needs; for granularity=word a space character may be useful.
*/
#define SEPARATOR "\n"
/* Extract text from a document for which a TET handle is already available */
static void
extract_text(TET *tet, int doc, FILE *outfp)
{
int n_pages;
volatile int pageno = 0;
/* get number of pages in the document */
n_pages = (int) TET_pcos_get_number(tet, doc, "length:pages");
/* loop over all pages */
for (pageno = 1; pageno <= n_pages; ++pageno)
{
const char *text;
int page;
int len;
page = TET_open_page(tet, doc, pageno, pageoptlist);
if (page == -1)
{
fprintf(stderr, "Error %d in %s() on page %d: %s\n",
TET_get_errnum(tet), TET_get_apiname(tet), pageno,
TET_get_errmsg(tet));
continue; /* try next page */
}
/* Retrieve all text fragments; This loop is actually not required
* for granularity=page, but must be used for other granularities.
*/
while ((text = TET_get_text(tet, page, &len)) != 0)
{
fprintf(outfp, "%s", text); /* print the retrieved text */
/* print a separator between chunks of text */
fprintf(outfp, SEPARATOR);
}
if (TET_get_errnum(tet) != 0)
{
fprintf(stderr, "Error %d in %s() on page %d: %s\n",
TET_get_errnum(tet), TET_get_apiname(tet), pageno,
TET_get_errmsg(tet));
}
TET_close_page(tet, page);
}
}
/* Open a named physical or virtual file, extract the text from it,
search for document or page attachments, and process these recursively.
Either filename must be supplied for physical files, or data+length
from which a virtual file will be created.
The caller cannot create the PVF file since we create a new TET object
here in case an exception happens with the embedded document - the
caller can happily continue with his TET object even in case of an
exception here.
*/
static int
process_document(FILE *outfp, const char *filename, const char *realname,
const unsigned char *data, int length)
{
TET *tet;
if ((tet = TET_new()) == (TET *) 0)
{
fprintf(stderr, "extractor: out of memory\n");
return(4);
}
TET_TRY (tet)
{
const char *pvfname = "/pvf/attachment";
int doc;
int file, filecount;
int page, pagecount;
const unsigned char *attdata;
int attlength;
int objtype;
/* Construct a PVF file if data instead of a filename was provided */
if (!filename)
{
TET_create_pvf(tet, pvfname, 0, data, length, "");
filename = pvfname;
}
TET_set_option(tet, globaloptlist);
doc = TET_open_document(tet, filename, 0, docoptlist);
if (doc == -1)
{
fprintf(stderr,
"Error %d in %s() (source: attachment '%s'): %s\n",
TET_get_errnum(tet), TET_get_apiname(tet),
realname, TET_get_errmsg(tet));
TET_EXIT_TRY(tet);
TET_delete(tet);
return(5);
}
/* -------------------- Extract the document's own page contents */
extract_text(tet, doc, outfp);
/* -------------------- Process all document-level file attachments */
/* Get the number of document-level file attachments. */
filecount = (int) TET_pcos_get_number(tet, doc,
"length:names/EmbeddedFiles");
for (file = 0; file < filecount; file++)
{
const char *attname;
/* fetch the name of the file attachment; check for Unicode file
* name (a PDF 1.7 feature)
*/
objtype = (int) TET_pcos_get_number(tet, doc,
"type:names/EmbeddedFiles[%d]/UF", file);
if (objtype == pcos_ot_string)
{
attname = TET_pcos_get_string(tet, doc,
"names/EmbeddedFiles[%d]/UF", file);
}
else {
/* fetch the name of the file attachment */
objtype = (int) TET_pcos_get_number(tet, doc,
"type:names/EmbeddedFiles[%d]/F", file);
if (objtype == pcos_ot_string)
{
attname = TET_pcos_get_string(tet, doc,
"names/EmbeddedFiles[%d]/F", file);
}
else
{
attname = "(unnamed)";
}
}
fprintf(outfp, "\n----- File attachment '%s':\n", attname);
/* fetch the contents of the file attachment and process it */
objtype = (int) TET_pcos_get_number(tet, doc,
"type:names/EmbeddedFiles[%d]/EF/F", file);
if (objtype == pcos_ot_stream)
{
attdata = TET_pcos_get_stream(tet, doc, &attlength, "",
"names/EmbeddedFiles[%d]/EF/F", file);
(void) process_document(outfp, 0, attname, attdata, attlength);
}
}
/* -------------------- Process all page-level file attachments */
pagecount = (int) TET_pcos_get_number(tet, doc, "length:pages");
/* Check all pages for annotations of type FileAttachment */
for (page = 0; page < pagecount; page++)
{
int annot, annotcount;
annotcount = (int) TET_pcos_get_number(tet, doc,
"length:pages[%d]/Annots", page);
for (annot = 0; annot < annotcount; annot++)
{
const char *val;
char attname[128];
val = TET_pcos_get_string(tet, doc,
"pages[%d]/Annots[%d]/Subtype", page, annot);
sprintf(attname, "page %d, annotation %d", page+1, annot+1);
if (!strcmp(val, "FileAttachment"))
{
/* fetch the contents of the attachment and process it */
objtype = (int) TET_pcos_get_number(tet, doc,
"type:pages[%d]/Annots[%d]/FS/EF/F", page, annot);
if (objtype == pcos_ot_stream)
{
attdata = TET_pcos_get_stream(tet, doc, &attlength, "",
"pages[%d]/Annots[%d]/FS/EF/F", page, annot);
(void) process_document(outfp, 0,
attname, attdata, attlength);
}
}
}
}
TET_close_document(tet, doc);
/* If there was no PVF file deleting it won't do any harm */
TET_delete_pvf(tet, pvfname, 0);
}
TET_CATCH (tet)
{
fprintf(stderr,
"Error %d in %s() (source: attachment '%s'): %s\n",
TET_get_errnum(tet), TET_get_apiname(tet),
realname, TET_get_errmsg(tet));
}
TET_delete(tet);
return(0);
}
int main(int argc, char **argv)
{
FILE *outfp;
int ret = 0;
if (argc != 3)
{
fprintf(stderr, "usage: %s <infilename> <outfilename>\n", argv[0]);
return(2);
}
if ((outfp = fopen(argv[2], "w")) == NULL)
{
fprintf(stderr, "Error: couldn't open output file '%s'\n", argv[2]);
return(3);
}
ret = process_document(outfp, argv[1], argv[1], 0, 0);
fclose(outfp);
return ret;
}
| kentaiwami/masamon | masamon/TET/c/get_attachments.c | C | mit | 7,568 |
/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2016 Scott Shawcroft for Adafruit Industries
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "shared-bindings/microcontroller/Pin.h"
#include "shared-bindings/digitalio/DigitalInOut.h"
#include "nrf_gpio.h"
#include "py/mphal.h"
#include "nrf/pins.h"
#include "supervisor/shared/rgb_led_status.h"
#ifdef MICROPY_HW_NEOPIXEL
bool neopixel_in_use;
#endif
#ifdef MICROPY_HW_APA102_MOSI
bool apa102_sck_in_use;
bool apa102_mosi_in_use;
#endif
#ifdef SPEAKER_ENABLE_PIN
bool speaker_enable_in_use;
#endif
// Bit mask of claimed pins on each of up to two ports. nrf52832 has one port; nrf52840 has two.
STATIC uint32_t claimed_pins[GPIO_COUNT];
STATIC uint32_t never_reset_pins[GPIO_COUNT];
STATIC void reset_speaker_enable_pin(void) {
#ifdef SPEAKER_ENABLE_PIN
speaker_enable_in_use = false;
nrf_gpio_cfg(SPEAKER_ENABLE_PIN->number,
NRF_GPIO_PIN_DIR_OUTPUT,
NRF_GPIO_PIN_INPUT_DISCONNECT,
NRF_GPIO_PIN_NOPULL,
NRF_GPIO_PIN_H0H1,
NRF_GPIO_PIN_NOSENSE);
nrf_gpio_pin_write(SPEAKER_ENABLE_PIN->number, false);
#endif
}
void reset_all_pins(void) {
for (size_t i = 0; i < GPIO_COUNT; i++) {
claimed_pins[i] = never_reset_pins[i];
}
for (uint32_t pin = 0; pin < NUMBER_OF_PINS; ++pin) {
if ((never_reset_pins[nrf_pin_port(pin)] & (1 << nrf_relative_pin_number(pin))) != 0) {
continue;
}
nrf_gpio_cfg_default(pin);
}
#ifdef MICROPY_HW_NEOPIXEL
neopixel_in_use = false;
#endif
#ifdef MICROPY_HW_APA102_MOSI
apa102_sck_in_use = false;
apa102_mosi_in_use = false;
#endif
// After configuring SWD because it may be shared.
reset_speaker_enable_pin();
}
// Mark pin as free and return it to a quiescent state.
void reset_pin_number(uint8_t pin_number) {
if (pin_number == NO_PIN) {
return;
}
// Clear claimed bit.
claimed_pins[nrf_pin_port(pin_number)] &= ~(1 << nrf_relative_pin_number(pin_number));
#ifdef MICROPY_HW_NEOPIXEL
if (pin_number == MICROPY_HW_NEOPIXEL->number) {
neopixel_in_use = false;
rgb_led_status_init();
return;
}
#endif
#ifdef MICROPY_HW_APA102_MOSI
if (pin_number == MICROPY_HW_APA102_MOSI->number ||
pin_number == MICROPY_HW_APA102_SCK->number) {
apa102_mosi_in_use = apa102_mosi_in_use && pin_number != MICROPY_HW_APA102_MOSI->number;
apa102_sck_in_use = apa102_sck_in_use && pin_number != MICROPY_HW_APA102_SCK->number;
if (!apa102_sck_in_use && !apa102_mosi_in_use) {
rgb_led_status_init();
}
return;
}
#endif
#ifdef SPEAKER_ENABLE_PIN
if (pin_number == SPEAKER_ENABLE_PIN->number) {
reset_speaker_enable_pin();
}
#endif
}
void never_reset_pin_number(uint8_t pin_number) {
never_reset_pins[nrf_pin_port(pin_number)] |= 1 << nrf_relative_pin_number(pin_number);
}
void common_hal_never_reset_pin(const mcu_pin_obj_t* pin) {
never_reset_pin_number(pin->number);
}
void common_hal_reset_pin(const mcu_pin_obj_t* pin) {
reset_pin_number(pin->number);
}
void claim_pin(const mcu_pin_obj_t* pin) {
// Set bit in claimed_pins bitmask.
claimed_pins[nrf_pin_port(pin->number)] |= 1 << nrf_relative_pin_number(pin->number);
#ifdef MICROPY_HW_NEOPIXEL
if (pin == MICROPY_HW_NEOPIXEL) {
neopixel_in_use = true;
}
#endif
#ifdef MICROPY_HW_APA102_MOSI
if (pin == MICROPY_HW_APA102_MOSI) {
apa102_mosi_in_use = true;
}
if (pin == MICROPY_HW_APA102_SCK) {
apa102_sck_in_use = true;
}
#endif
#ifdef SPEAKER_ENABLE_PIN
if (pin == SPEAKER_ENABLE_PIN) {
speaker_enable_in_use = true;
}
#endif
}
bool pin_number_is_free(uint8_t pin_number) {
return !(claimed_pins[nrf_pin_port(pin_number)] & (1 << nrf_relative_pin_number(pin_number)));
}
bool common_hal_mcu_pin_is_free(const mcu_pin_obj_t *pin) {
#ifdef MICROPY_HW_NEOPIXEL
if (pin == MICROPY_HW_NEOPIXEL) {
return !neopixel_in_use;
}
#endif
#ifdef MICROPY_HW_APA102_MOSI
if (pin == MICROPY_HW_APA102_MOSI) {
return !apa102_mosi_in_use;
}
if (pin == MICROPY_HW_APA102_SCK) {
return !apa102_sck_in_use;
}
#endif
#ifdef SPEAKER_ENABLE_PIN
if (pin == SPEAKER_ENABLE_PIN) {
return !speaker_enable_in_use;
}
#endif
#ifdef NRF52840
// If NFC pins are enabled for NFC, don't allow them to be used for GPIO.
if (((NRF_UICR->NFCPINS & UICR_NFCPINS_PROTECT_Msk) ==
(UICR_NFCPINS_PROTECT_NFC << UICR_NFCPINS_PROTECT_Pos)) &&
(pin->number == 9 || pin->number == 10)) {
return false;
}
#endif
return pin_number_is_free(pin->number);
}
uint8_t common_hal_mcu_pin_number(const mcu_pin_obj_t* pin) {
return pin->number;
}
void common_hal_mcu_pin_claim(const mcu_pin_obj_t* pin) {
claim_pin(pin);
}
void common_hal_mcu_pin_reset_number(uint8_t pin_no) {
reset_pin_number(pin_no);
}
| adafruit/micropython | ports/nrf/common-hal/microcontroller/Pin.c | C | mit | 6,219 |
#include "../include/csl.h"
void
MultipleEscape ( )
{
_MultipleEscape ( _Context_->Lexer0 ) ;
}
void
CSL_Strlen ( )
{
DataStack_Push ( (int64) Strlen ( (char*) DataStack_Pop ( ) ) ) ;
}
void
CSL_Strcmp ( )
{
DataStack_Push ( (int64) Strcmp ( (byte*) DataStack_Pop ( ), (byte*) DataStack_Pop ( ) ) ) ;
}
void
CSL_Stricmp ( )
{
DataStack_Push ( (int64) Stricmp ( (byte*) DataStack_Pop ( ), (byte*) DataStack_Pop ( ) ) ) ;
}
//char * strcat ( char * destination, const char * source );
void
CSL_StrCat ( )
{
//Buffer * b = Buffer_New ( BUFFER_SIZE ) ;
byte * buffer = Buffer_Data ( _CSL_->StrCatBuffer ); byte *str ;
char * src = (char*) DataStack_Pop ( ) ;
char * dst = (char*) DataStack_Pop ( ) ;
strcpy ( (char*) buffer, dst ) ;
if (src) strcat ( (char *) buffer, src ) ;
str = String_New ( buffer, TEMPORARY ) ; //String_New ( (byte*) buffer, DICTIONARY ) ;
DataStack_Push ( (int64) str ) ;
//Buffer_SetAsUnused ( b ) ; ;
}
void
CSL_StrCpy ( )
{
// !! nb. this cant really work !! what do we want here ??
DataStack_Push ( (int64) strcpy ( (char*) DataStack_Pop ( ), (char*) DataStack_Pop ( ) ) ) ;
}
void
String_GetStringToEndOfLine ( )
{
DataStack_Push ( (int64) _String_Get_ReadlineString_ToEndOfLine ( ) ) ;
}
| dennisj001/openvmtil64 | src/primitives/strings.c | C | mit | 1,290 |
/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2014 Damien P. George
* Copyright (c) 2016 Paul Sokolovsky
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "py/mpconfig.h"
#if MICROPY_VFS_FAT
#if !MICROPY_VFS
#error "with MICROPY_VFS_FAT enabled, must also enable MICROPY_VFS"
#endif
#include <string.h>
#include "py/nlr.h"
#include "py/runtime.h"
#include "py/mperrno.h"
#include "lib/oofatfs/ff.h"
#include "extmod/vfs_fat.h"
#include "lib/timeutils/timeutils.h"
#if _MAX_SS == _MIN_SS
#define SECSIZE(fs) (_MIN_SS)
#else
#define SECSIZE(fs) ((fs)->ssize)
#endif
#define mp_obj_fat_vfs_t fs_user_mount_t
STATIC mp_obj_t fat_vfs_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
mp_arg_check_num(n_args, n_kw, 1, 1, false);
// create new object
fs_user_mount_t *vfs = m_new_obj(fs_user_mount_t);
vfs->base.type = type;
vfs->flags = FSUSER_FREE_OBJ;
vfs->fatfs.drv = vfs;
// load block protocol methods
mp_load_method(args[0], MP_QSTR_readblocks, vfs->readblocks);
mp_load_method_maybe(args[0], MP_QSTR_writeblocks, vfs->writeblocks);
mp_load_method_maybe(args[0], MP_QSTR_ioctl, vfs->u.ioctl);
if (vfs->u.ioctl[0] != MP_OBJ_NULL) {
// device supports new block protocol, so indicate it
vfs->flags |= FSUSER_HAVE_IOCTL;
} else {
// no ioctl method, so assume the device uses the old block protocol
mp_load_method_maybe(args[0], MP_QSTR_sync, vfs->u.old.sync);
mp_load_method(args[0], MP_QSTR_count, vfs->u.old.count);
}
return MP_OBJ_FROM_PTR(vfs);
}
STATIC mp_obj_t fat_vfs_mkfs(mp_obj_t bdev_in) {
// create new object
fs_user_mount_t *vfs = MP_OBJ_TO_PTR(fat_vfs_make_new(&mp_fat_vfs_type, 1, 0, &bdev_in));
// make the filesystem
uint8_t working_buf[_MAX_SS];
FRESULT res = f_mkfs(&vfs->fatfs, FM_FAT | FM_SFD, 0, working_buf, sizeof(working_buf));
if (res != FR_OK) {
mp_raise_OSError(fresult_to_errno_table[res]);
}
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(fat_vfs_mkfs_fun_obj, fat_vfs_mkfs);
STATIC MP_DEFINE_CONST_STATICMETHOD_OBJ(fat_vfs_mkfs_obj, MP_ROM_PTR(&fat_vfs_mkfs_fun_obj));
STATIC MP_DEFINE_CONST_FUN_OBJ_3(fat_vfs_open_obj, fatfs_builtin_open_self);
STATIC mp_obj_t fat_vfs_listdir_func(size_t n_args, const mp_obj_t *args) {
mp_obj_fat_vfs_t *self = MP_OBJ_TO_PTR(args[0]);
bool is_str_type = true;
const char *path;
if (n_args == 2) {
if (mp_obj_get_type(args[1]) == &mp_type_bytes) {
is_str_type = false;
}
path = mp_obj_str_get_str(args[1]);
} else {
path = "";
}
return fat_vfs_listdir2(self, path, is_str_type);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(fat_vfs_listdir_obj, 1, 2, fat_vfs_listdir_func);
STATIC mp_obj_t fat_vfs_remove_internal(mp_obj_t vfs_in, mp_obj_t path_in, mp_int_t attr) {
mp_obj_fat_vfs_t *self = MP_OBJ_TO_PTR(vfs_in);
const char *path = mp_obj_str_get_str(path_in);
FILINFO fno;
FRESULT res = f_stat(&self->fatfs, path, &fno);
if (res != FR_OK) {
mp_raise_OSError(fresult_to_errno_table[res]);
}
// check if path is a file or directory
if ((fno.fattrib & AM_DIR) == attr) {
res = f_unlink(&self->fatfs, path);
if (res != FR_OK) {
mp_raise_OSError(fresult_to_errno_table[res]);
}
return mp_const_none;
} else {
mp_raise_OSError(attr ? MP_ENOTDIR : MP_EISDIR);
}
}
STATIC mp_obj_t fat_vfs_remove(mp_obj_t vfs_in, mp_obj_t path_in) {
return fat_vfs_remove_internal(vfs_in, path_in, 0); // 0 == file attribute
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(fat_vfs_remove_obj, fat_vfs_remove);
STATIC mp_obj_t fat_vfs_rmdir(mp_obj_t vfs_in, mp_obj_t path_in) {
return fat_vfs_remove_internal(vfs_in, path_in, AM_DIR);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(fat_vfs_rmdir_obj, fat_vfs_rmdir);
STATIC mp_obj_t fat_vfs_rename(mp_obj_t vfs_in, mp_obj_t path_in, mp_obj_t path_out) {
mp_obj_fat_vfs_t *self = MP_OBJ_TO_PTR(vfs_in);
const char *old_path = mp_obj_str_get_str(path_in);
const char *new_path = mp_obj_str_get_str(path_out);
FRESULT res = f_rename(&self->fatfs, old_path, new_path);
if (res == FR_EXIST) {
// if new_path exists then try removing it (but only if it's a file)
fat_vfs_remove_internal(vfs_in, path_out, 0); // 0 == file attribute
// try to rename again
res = f_rename(&self->fatfs, old_path, new_path);
}
if (res == FR_OK) {
return mp_const_none;
} else {
mp_raise_OSError(fresult_to_errno_table[res]);
}
}
STATIC MP_DEFINE_CONST_FUN_OBJ_3(fat_vfs_rename_obj, fat_vfs_rename);
STATIC mp_obj_t fat_vfs_mkdir(mp_obj_t vfs_in, mp_obj_t path_o) {
mp_obj_fat_vfs_t *self = MP_OBJ_TO_PTR(vfs_in);
const char *path = mp_obj_str_get_str(path_o);
FRESULT res = f_mkdir(&self->fatfs, path);
if (res == FR_OK) {
return mp_const_none;
} else {
mp_raise_OSError(fresult_to_errno_table[res]);
}
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(fat_vfs_mkdir_obj, fat_vfs_mkdir);
/// Change current directory.
STATIC mp_obj_t fat_vfs_chdir(mp_obj_t vfs_in, mp_obj_t path_in) {
mp_obj_fat_vfs_t *self = MP_OBJ_TO_PTR(vfs_in);
const char *path;
path = mp_obj_str_get_str(path_in);
FRESULT res = f_chdir(&self->fatfs, path);
if (res != FR_OK) {
mp_raise_OSError(fresult_to_errno_table[res]);
}
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(fat_vfs_chdir_obj, fat_vfs_chdir);
/// Get the current directory.
STATIC mp_obj_t fat_vfs_getcwd(mp_obj_t vfs_in) {
mp_obj_fat_vfs_t *self = MP_OBJ_TO_PTR(vfs_in);
char buf[MICROPY_ALLOC_PATH_MAX + 1];
FRESULT res = f_getcwd(&self->fatfs, buf, sizeof(buf));
if (res != FR_OK) {
mp_raise_OSError(fresult_to_errno_table[res]);
}
return mp_obj_new_str(buf, strlen(buf), false);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(fat_vfs_getcwd_obj, fat_vfs_getcwd);
/// \function stat(path)
/// Get the status of a file or directory.
STATIC mp_obj_t fat_vfs_stat(mp_obj_t vfs_in, mp_obj_t path_in) {
mp_obj_fat_vfs_t *self = MP_OBJ_TO_PTR(vfs_in);
const char *path = mp_obj_str_get_str(path_in);
FILINFO fno;
if (path[0] == 0 || (path[0] == '/' && path[1] == 0)) {
// stat root directory
fno.fsize = 0;
fno.fdate = 0x2821; // Jan 1, 2000
fno.ftime = 0;
fno.fattrib = AM_DIR;
} else {
FRESULT res = f_stat(&self->fatfs, path, &fno);
if (res != FR_OK) {
mp_raise_OSError(fresult_to_errno_table[res]);
}
}
mp_obj_tuple_t *t = MP_OBJ_TO_PTR(mp_obj_new_tuple(10, NULL));
mp_int_t mode = 0;
if (fno.fattrib & AM_DIR) {
mode |= 0x4000; // stat.S_IFDIR
} else {
mode |= 0x8000; // stat.S_IFREG
}
mp_int_t seconds = timeutils_seconds_since_2000(
1980 + ((fno.fdate >> 9) & 0x7f),
(fno.fdate >> 5) & 0x0f,
fno.fdate & 0x1f,
(fno.ftime >> 11) & 0x1f,
(fno.ftime >> 5) & 0x3f,
2 * (fno.ftime & 0x1f)
);
t->items[0] = MP_OBJ_NEW_SMALL_INT(mode); // st_mode
t->items[1] = MP_OBJ_NEW_SMALL_INT(0); // st_ino
t->items[2] = MP_OBJ_NEW_SMALL_INT(0); // st_dev
t->items[3] = MP_OBJ_NEW_SMALL_INT(0); // st_nlink
t->items[4] = MP_OBJ_NEW_SMALL_INT(0); // st_uid
t->items[5] = MP_OBJ_NEW_SMALL_INT(0); // st_gid
t->items[6] = MP_OBJ_NEW_SMALL_INT(fno.fsize); // st_size
t->items[7] = MP_OBJ_NEW_SMALL_INT(seconds); // st_atime
t->items[8] = MP_OBJ_NEW_SMALL_INT(seconds); // st_mtime
t->items[9] = MP_OBJ_NEW_SMALL_INT(seconds); // st_ctime
return MP_OBJ_FROM_PTR(t);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(fat_vfs_stat_obj, fat_vfs_stat);
// Get the status of a VFS.
STATIC mp_obj_t fat_vfs_statvfs(mp_obj_t vfs_in, mp_obj_t path_in) {
mp_obj_fat_vfs_t *self = MP_OBJ_TO_PTR(vfs_in);
(void)path_in;
DWORD nclst;
FATFS *fatfs = &self->fatfs;
FRESULT res = f_getfree(fatfs, &nclst);
if (FR_OK != res) {
mp_raise_OSError(fresult_to_errno_table[res]);
}
mp_obj_tuple_t *t = MP_OBJ_TO_PTR(mp_obj_new_tuple(10, NULL));
t->items[0] = MP_OBJ_NEW_SMALL_INT(fatfs->csize * SECSIZE(fatfs)); // f_bsize
t->items[1] = t->items[0]; // f_frsize
t->items[2] = MP_OBJ_NEW_SMALL_INT((fatfs->n_fatent - 2)); // f_blocks
t->items[3] = MP_OBJ_NEW_SMALL_INT(nclst); // f_bfree
t->items[4] = t->items[3]; // f_bavail
t->items[5] = MP_OBJ_NEW_SMALL_INT(0); // f_files
t->items[6] = MP_OBJ_NEW_SMALL_INT(0); // f_ffree
t->items[7] = MP_OBJ_NEW_SMALL_INT(0); // f_favail
t->items[8] = MP_OBJ_NEW_SMALL_INT(0); // f_flags
t->items[9] = MP_OBJ_NEW_SMALL_INT(_MAX_LFN); // f_namemax
return MP_OBJ_FROM_PTR(t);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(fat_vfs_statvfs_obj, fat_vfs_statvfs);
STATIC mp_obj_t vfs_fat_mount(mp_obj_t self_in, mp_obj_t readonly, mp_obj_t mkfs) {
fs_user_mount_t *self = MP_OBJ_TO_PTR(self_in);
// Read-only device indicated by writeblocks[0] == MP_OBJ_NULL.
// User can specify read-only device by:
// 1. readonly=True keyword argument
// 2. nonexistent writeblocks method (then writeblocks[0] == MP_OBJ_NULL already)
if (mp_obj_is_true(readonly)) {
self->writeblocks[0] = MP_OBJ_NULL;
}
// mount the block device
FRESULT res = f_mount(&self->fatfs);
// check if we need to make the filesystem
if (res == FR_NO_FILESYSTEM && mp_obj_is_true(mkfs)) {
uint8_t working_buf[_MAX_SS];
res = f_mkfs(&self->fatfs, FM_FAT | FM_SFD, 0, working_buf, sizeof(working_buf));
}
if (res != FR_OK) {
mp_raise_OSError(fresult_to_errno_table[res]);
}
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_3(vfs_fat_mount_obj, vfs_fat_mount);
STATIC mp_obj_t vfs_fat_umount(mp_obj_t self_in) {
fs_user_mount_t *self = MP_OBJ_TO_PTR(self_in);
FRESULT res = f_umount(&self->fatfs);
if (res != FR_OK) {
mp_raise_OSError(fresult_to_errno_table[res]);
}
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(fat_vfs_umount_obj, vfs_fat_umount);
STATIC const mp_rom_map_elem_t fat_vfs_locals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR_mkfs), MP_ROM_PTR(&fat_vfs_mkfs_obj) },
{ MP_ROM_QSTR(MP_QSTR_open), MP_ROM_PTR(&fat_vfs_open_obj) },
{ MP_ROM_QSTR(MP_QSTR_listdir), MP_ROM_PTR(&fat_vfs_listdir_obj) },
{ MP_ROM_QSTR(MP_QSTR_mkdir), MP_ROM_PTR(&fat_vfs_mkdir_obj) },
{ MP_ROM_QSTR(MP_QSTR_rmdir), MP_ROM_PTR(&fat_vfs_rmdir_obj) },
{ MP_ROM_QSTR(MP_QSTR_chdir), MP_ROM_PTR(&fat_vfs_chdir_obj) },
{ MP_ROM_QSTR(MP_QSTR_getcwd), MP_ROM_PTR(&fat_vfs_getcwd_obj) },
{ MP_ROM_QSTR(MP_QSTR_remove), MP_ROM_PTR(&fat_vfs_remove_obj) },
{ MP_ROM_QSTR(MP_QSTR_rename), MP_ROM_PTR(&fat_vfs_rename_obj) },
{ MP_ROM_QSTR(MP_QSTR_stat), MP_ROM_PTR(&fat_vfs_stat_obj) },
{ MP_ROM_QSTR(MP_QSTR_statvfs), MP_ROM_PTR(&fat_vfs_statvfs_obj) },
{ MP_ROM_QSTR(MP_QSTR_mount), MP_ROM_PTR(&vfs_fat_mount_obj) },
{ MP_ROM_QSTR(MP_QSTR_umount), MP_ROM_PTR(&fat_vfs_umount_obj) },
};
STATIC MP_DEFINE_CONST_DICT(fat_vfs_locals_dict, fat_vfs_locals_dict_table);
const mp_obj_type_t mp_fat_vfs_type = {
{ &mp_type_type },
.name = MP_QSTR_VfsFat,
.make_new = fat_vfs_make_new,
.locals_dict = (mp_obj_dict_t*)&fat_vfs_locals_dict,
};
#endif // MICROPY_VFS_FAT
| Peetz0r/micropython-esp32 | extmod/vfs_fat.c | C | mit | 12,566 |
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE78_OS_Command_Injection__char_connect_socket_w32_execv_34.c
Label Definition File: CWE78_OS_Command_Injection.strings.label.xml
Template File: sources-sink-34.tmpl.c
*/
/*
* @description
* CWE: 78 OS Command Injection
* BadSource: connect_socket Read data using a connect socket (client side)
* GoodSource: Fixed string
* Sinks: w32_execv
* BadSink : execute command with execv
* Flow Variant: 34 Data flow: use of a union containing two methods of accessing the same data (within the same function)
*
* */
#include "std_testcase.h"
#include <wchar.h>
#ifdef _WIN32
#define COMMAND_INT_PATH "%WINDIR%\\system32\\cmd.exe"
#define COMMAND_INT "cmd.exe"
#define COMMAND_ARG1 "/c"
#define COMMAND_ARG2 "dir"
#define COMMAND_ARG3 data
#else /* NOT _WIN32 */
#include <unistd.h>
#define COMMAND_INT_PATH "/bin/sh"
#define COMMAND_INT "sh"
#define COMMAND_ARG1 "ls"
#define COMMAND_ARG2 "-la"
#define COMMAND_ARG3 data
#endif
#ifdef _WIN32
#include <winsock2.h>
#include <windows.h>
#include <direct.h>
#pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */
#define CLOSE_SOCKET closesocket
#else /* NOT _WIN32 */
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#define INVALID_SOCKET -1
#define SOCKET_ERROR -1
#define CLOSE_SOCKET close
#define SOCKET int
#endif
#define TCP_PORT 27015
#define IP_ADDRESS "127.0.0.1"
#include <process.h>
#define EXECV _execv
typedef union
{
char * unionFirst;
char * unionSecond;
} CWE78_OS_Command_Injection__char_connect_socket_w32_execv_34_unionType;
#ifndef OMITBAD
void CWE78_OS_Command_Injection__char_connect_socket_w32_execv_34_bad()
{
char * data;
CWE78_OS_Command_Injection__char_connect_socket_w32_execv_34_unionType myUnion;
char dataBuffer[100] = "";
data = dataBuffer;
{
#ifdef _WIN32
WSADATA wsaData;
int wsaDataInit = 0;
#endif
int recvResult;
struct sockaddr_in service;
char *replace;
SOCKET connectSocket = INVALID_SOCKET;
size_t dataLen = strlen(data);
do
{
#ifdef _WIN32
if (WSAStartup(MAKEWORD(2,2), &wsaData) != NO_ERROR)
{
break;
}
wsaDataInit = 1;
#endif
/* POTENTIAL FLAW: Read data using a connect socket */
connectSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (connectSocket == INVALID_SOCKET)
{
break;
}
memset(&service, 0, sizeof(service));
service.sin_family = AF_INET;
service.sin_addr.s_addr = inet_addr(IP_ADDRESS);
service.sin_port = htons(TCP_PORT);
if (connect(connectSocket, (struct sockaddr*)&service, sizeof(service)) == SOCKET_ERROR)
{
break;
}
/* Abort on error or the connection was closed, make sure to recv one
* less char than is in the recv_buf in order to append a terminator */
/* Abort on error or the connection was closed */
recvResult = recv(connectSocket, (char *)(data + dataLen), sizeof(char) * (100 - dataLen - 1), 0);
if (recvResult == SOCKET_ERROR || recvResult == 0)
{
break;
}
/* Append null terminator */
data[dataLen + recvResult / sizeof(char)] = '\0';
/* Eliminate CRLF */
replace = strchr(data, '\r');
if (replace)
{
*replace = '\0';
}
replace = strchr(data, '\n');
if (replace)
{
*replace = '\0';
}
}
while (0);
if (connectSocket != INVALID_SOCKET)
{
CLOSE_SOCKET(connectSocket);
}
#ifdef _WIN32
if (wsaDataInit)
{
WSACleanup();
}
#endif
}
myUnion.unionFirst = data;
{
char * data = myUnion.unionSecond;
{
char *args[] = {COMMAND_INT_PATH, COMMAND_ARG1, COMMAND_ARG2, COMMAND_ARG3, NULL};
/* execv - specify the path where the command is located */
/* POTENTIAL FLAW: Execute command without validating input possibly leading to command injection */
EXECV(COMMAND_INT_PATH, args);
}
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B() uses the GoodSource with the BadSink */
static void goodG2B()
{
char * data;
CWE78_OS_Command_Injection__char_connect_socket_w32_execv_34_unionType myUnion;
char dataBuffer[100] = "";
data = dataBuffer;
/* FIX: Append a fixed string to data (not user / external input) */
strcat(data, "*.*");
myUnion.unionFirst = data;
{
char * data = myUnion.unionSecond;
{
char *args[] = {COMMAND_INT_PATH, COMMAND_ARG1, COMMAND_ARG2, COMMAND_ARG3, NULL};
/* execv - specify the path where the command is located */
/* POTENTIAL FLAW: Execute command without validating input possibly leading to command injection */
EXECV(COMMAND_INT_PATH, args);
}
}
}
void CWE78_OS_Command_Injection__char_connect_socket_w32_execv_34_good()
{
goodG2B();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE78_OS_Command_Injection__char_connect_socket_w32_execv_34_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE78_OS_Command_Injection__char_connect_socket_w32_execv_34_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
| maurer/tiamat | samples/Juliet/testcases/CWE78_OS_Command_Injection/s01/CWE78_OS_Command_Injection__char_connect_socket_w32_execv_34.c | C | mit | 6,320 |
#include "genfft.h"
/**
* NAME: cc1fft
*
* DESCRIPTION: complex to complex FFT
*
* USAGE:
* void cc1fft(complex *data, int n, int sign)
*
* INPUT: - *data: complex 1D input vector
* - n: number of samples in input vector data
* - sign: sign of the Fourier kernel
*
* OUTPUT: - *data: complex 1D output vector unscaled
*
* NOTES: Optimized system dependent FFT's implemented for:
* - inplace FFT from Mayer and SU (see file fft_mayer.c)
*
* AUTHOR:
* Jan Thorbecke (janth@xs4all.nl)
* The Netherlands
*
*
*----------------------------------------------------------------------
* REVISION HISTORY:
* VERSION AUTHOR DATE COMMENT
* 1.0 Jan Thorbecke Feb '94 Initial version (TU Delft)
* 1.1 Jan Thorbecke June '94 faster in-place FFT
* 2.0 Jan Thorbecke July '97 added Cray SGI calls
* 2.1 Alexander Koek June '98 updated SCS for use inside
* parallel loops
*
*
----------------------------------------------------------------------*/
#if defined(ACML440)
#if defined(DOUBLE)
#define acmlcc1fft zfft1dx
#else
#define acmlcc1fft cfft1dx
#endif
#endif
void cc1fft(complex *data, int n, int sign)
{
#if defined(HAVE_LIBSCS)
int ntable, nwork, zero=0;
static int isys, nprev[MAX_NUMTHREADS];
static float *work[MAX_NUMTHREADS], *table[MAX_NUMTHREADS], scale=1.0;
int pe, i;
#elif defined(ACML440)
static int nprev=0;
int nwork, zero=0, one=1, inpl=1, i;
static int isys;
static complex *work;
REAL scl;
complex *y;
#endif
#if defined(HAVE_LIBSCS)
pe = mp_my_threadnum();
assert ( pe <= MAX_NUMTHREADS );
if (n != nprev[pe]) {
isys = 0;
ntable = 2*n + 30;
nwork = 2*n;
/* allocate memory on each processor locally for speed */
if (work[pe]) free(work[pe]);
work[pe] = (float *)malloc(nwork*sizeof(float));
if (work[pe] == NULL)
fprintf(stderr,"cc1fft: memory allocation error\n");
if (table[pe]) free(table[pe]);
table[pe] = (float *)malloc(ntable*sizeof(float));
if (table[pe] == NULL)
fprintf(stderr,"cc1fft: memory allocation error\n");
ccfft_(&zero, &n, &scale, data, data, table[pe], work[pe], &isys);
nprev[pe] = n;
}
ccfft_(&sign, &n, &scale, data, data, table[pe], work[pe], &isys);
#elif defined(ACML440)
scl = 1.0;
if (n != nprev) {
isys = 0;
nwork = 5*n + 100;
if (work) free(work);
work = (complex *)malloc(nwork*sizeof(complex));
if (work == NULL) fprintf(stderr,"rc1fft: memory allocation error\n");
acmlcc1fft(zero, scl, inpl, n, data, 1, y, 1, work, &isys);
nprev = n;
}
acmlcc1fft(sign, scl, inpl, n, data, 1, y, 1, work, &isys);
#else
cc1_fft(data, n, sign);
#endif
return;
}
/****************** NO COMPLEX DEFINED ******************/
void Rcc1fft(float *data, int n, int sign)
{
cc1fft((complex *)data, n , sign);
return;
}
/****************** FORTRAN SHELL *****************/
void cc1fft_(complex *data, int *n, int *sign)
{
cc1fft(data, *n, *sign);
return;
}
| sun031/Jan | FFTlib/cc1fft.c | C | epl-1.0 | 3,110 |
/*
* This file contains pieces of the Linux TCP/IP stack needed for modular
* TOE support.
*
* Copyright (C) 2006-2009 Chelsio Communications. All rights reserved.
* See the corresponding files in the Linux tree for copyrights of the
* original Linux code a lot of this file is based on.
*
* Written by Dimitris Michailidis (dm@chelsio.com)
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
/* The following tags are used by the out-of-kernel Makefile to identify
* supported kernel versions if a module_support-<kver> file is not found.
* Do not remove these tags.
* $SUPPORTED KERNEL 2.6.23$
* $SUPPORTED KERNEL 2.6.24$
* $SUPPORTED KERNEL 2.6.25$
* $SUPPORTED KERNEL 2.6.26$
* $SUPPORTED KERNEL 2.6.27$
* $SUPPORTED KERNEL 2.6.28$
* $SUPPORTED KERNEL 2.6.29$
* $SUPPORTED KERNEL 2.6.30$
* $SUPPORTED KERNEL 2.6.31$
* $SUPPORTED KERNEL 2.6.32$
* $SUPPORTED KERNEL 2.6.33$
* $SUPPORTED KERNEL 2.6.34$
* $SUPPORTED KERNEL 2.6.35$
* $SUPPORTED KERNEL 2.6.36$
* $SUPPORTED KERNEL 2.6.37$
*/
#include <net/tcp.h>
#include <linux/pkt_sched.h>
#include <linux/kprobes.h>
#include "defs.h"
#include <asm/tlbflush.h>
#if defined(CONFIG_SMP) && !defined(PPC64_TLB_BATCH_NR)
static unsigned long (*kallsyms_lookup_name_p)(const char *name);
static void (*flush_tlb_mm_p)(struct mm_struct *mm);
static void (*flush_tlb_page_p)(struct vm_area_struct *vma,
unsigned long va);
void flush_tlb_mm_offload(struct mm_struct *mm);
#endif
void flush_tlb_page_offload(struct vm_area_struct *vma, unsigned long addr)
{
#if defined(CONFIG_SMP) && !defined(PPC64_TLB_BATCH_NR)
flush_tlb_page_p(vma, addr);
#endif
}
int sysctl_tcp_window_scaling = 1;
int sysctl_tcp_adv_win_scale = 2;
#define ECN_OR_COST(class) TC_PRIO_##class
const __u8 ip_tos2prio[16] = {
TC_PRIO_BESTEFFORT,
ECN_OR_COST(FILLER),
TC_PRIO_BESTEFFORT,
ECN_OR_COST(BESTEFFORT),
TC_PRIO_BULK,
ECN_OR_COST(BULK),
TC_PRIO_BULK,
ECN_OR_COST(BULK),
TC_PRIO_INTERACTIVE,
ECN_OR_COST(INTERACTIVE),
TC_PRIO_INTERACTIVE,
ECN_OR_COST(INTERACTIVE),
TC_PRIO_INTERACTIVE_BULK,
ECN_OR_COST(INTERACTIVE_BULK),
TC_PRIO_INTERACTIVE_BULK,
ECN_OR_COST(INTERACTIVE_BULK)
};
/*
* Adapted from tcp_minisocks.c
*/
void tcp_time_wait(struct sock *sk, int state, int timeo)
{
struct inet_timewait_sock *tw = NULL;
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
int recycle_ok = 0;
if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
tw = inet_twsk_alloc(sk, state);
if (tw != NULL) {
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
tcptw->tw_rcv_nxt = tp->rcv_nxt;
tcptw->tw_snd_nxt = tp->snd_nxt;
tcptw->tw_rcv_wnd = tcp_receive_window(tp);
tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
/* Get the TIME_WAIT timeout firing. */
if (timeo < rto)
timeo = rto;
if (recycle_ok) {
tw->tw_timeout = rto;
} else {
tw->tw_timeout = TCP_TIMEWAIT_LEN;
if (state == TCP_TIME_WAIT)
timeo = TCP_TIMEWAIT_LEN;
}
inet_twsk_schedule(tw, &tcp_death_row, timeo,
TCP_TIMEWAIT_LEN);
inet_twsk_put(tw);
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than
* non-graceful socket closings.
*/
if (net_ratelimit())
printk(KERN_INFO
"TCP: time wait bucket table overflow\n");
}
tcp_done(sk);
}
void flush_tlb_mm_offload(struct mm_struct *mm)
{
#if defined(CONFIG_SMP) && !defined(PPC64_TLB_BATCH_NR)
if (flush_tlb_mm_p)
flush_tlb_mm_p(mm);
#endif
}
#if defined(CONFIG_SMP) && !defined(PPC64_TLB_BATCH_NR)
static int find_kallsyms_lookup_name(void)
{
int err = 0;
#if defined(KPROBES_KALLSYMS)
struct kprobe kp;
memset(&kp, 0, sizeof kp);
kp.symbol_name = "kallsyms_lookup_name";
err = register_kprobe(&kp);
if (!err) {
kallsyms_lookup_name_p = (void *)kp.addr;
unregister_kprobe(&kp);
}
#else
kallsyms_lookup_name_p = (void *)KALLSYMS_LOOKUP;
#endif
if (!err)
err = kallsyms_lookup_name_p == NULL;
return err;
}
#endif
int prepare_tom_for_offload(void)
{
#if defined(CONFIG_SMP) && !defined(PPC64_TLB_BATCH_NR)
if (!kallsyms_lookup_name_p) {
int err = find_kallsyms_lookup_name();
if (err)
return err;
}
flush_tlb_mm_p = (void *)kallsyms_lookup_name_p("flush_tlb_mm");
if (!flush_tlb_mm_p) {
printk(KERN_ERR "Could not locate flush_tlb_mm");
return -1;
}
flush_tlb_page_p = (void *)kallsyms_lookup_name_p("flush_tlb_page");
if (!flush_tlb_page_p) {
printk(KERN_ERR "Could not locate flush_tlb_page");
return -1;
}
#endif
return 0;
}
| nal-epfl/line-sigcomm14 | PF_RING-5.6.2/drivers/PF_RING_aware/chelsio/cxgb3-2.0.0.1/src/t3_tom/module_support/module_support-tom-2.6.23.c | C | gpl-2.0 | 5,114 |
/*
* Synopsys DesignWare I2C adapter driver (master only).
*
* Partly based on code of similar driver from U-Boot:
* Copyright (C) 2009 ST Micoelectronics
*
* and corresponding code from Linux Kernel
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
*
* Copyright (C) 2015 Andrey Smirnov <andrew.smirnov@gmail.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <clock.h>
#include <common.h>
#include <driver.h>
#include <init.h>
#include <of.h>
#include <malloc.h>
#include <types.h>
#include <xfuncs.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/math64.h>
#include <io.h>
#include <i2c/i2c.h>
#define DW_I2C_BIT_RATE 100000
#define DW_IC_CON 0x0
#define DW_IC_CON_MASTER (1 << 0)
#define DW_IC_CON_SPEED_STD (1 << 1)
#define DW_IC_CON_SPEED_FAST (1 << 2)
#define DW_IC_CON_SLAVE_DISABLE (1 << 6)
#define DW_IC_TAR 0x4
#define DW_IC_DATA_CMD 0x10
#define DW_IC_DATA_CMD_CMD (1 << 8)
#define DW_IC_DATA_CMD_STOP (1 << 9)
#define DW_IC_SS_SCL_HCNT 0x14
#define DW_IC_SS_SCL_LCNT 0x18
#define DW_IC_FS_SCL_HCNT 0x1c
#define DW_IC_FS_SCL_LCNT 0x20
#define DW_IC_INTR_MASK 0x30
#define DW_IC_RAW_INTR_STAT 0x34
#define DW_IC_INTR_RX_UNDER (1 << 0)
#define DW_IC_INTR_RX_OVER (1 << 1)
#define DW_IC_INTR_RX_FULL (1 << 2)
#define DW_IC_INTR_TX_OVER (1 << 3)
#define DW_IC_INTR_TX_EMPTY (1 << 4)
#define DW_IC_INTR_RD_REQ (1 << 5)
#define DW_IC_INTR_TX_ABRT (1 << 6)
#define DW_IC_INTR_RX_DONE (1 << 7)
#define DW_IC_INTR_ACTIVITY (1 << 8)
#define DW_IC_INTR_STOP_DET (1 << 9)
#define DW_IC_INTR_START_DET (1 << 10)
#define DW_IC_INTR_GEN_CALL (1 << 11)
#define DW_IC_RX_TL 0x38
#define DW_IC_TX_TL 0x3c
#define DW_IC_CLR_INTR 0x40
#define DW_IC_CLR_TX_ABRT 0x54
#define DW_IC_SDA_HOLD 0x7c
#define DW_IC_ENABLE 0x6c
#define DW_IC_ENABLE_ENABLE (1 << 0)
#define DW_IC_STATUS 0x70
#define DW_IC_STATUS_TFNF (1 << 1)
#define DW_IC_STATUS_TFE (1 << 2)
#define DW_IC_STATUS_RFNE (1 << 3)
#define DW_IC_STATUS_MST_ACTIVITY (1 << 5)
#define DW_IC_TX_ABRT_SOURCE 0x80
#define DW_IC_ENABLE_STATUS 0x9c
#define DW_IC_ENABLE_STATUS_IC_EN (1 << 0)
#define DW_IC_COMP_VERSION 0xf8
#define DW_IC_SDA_HOLD_MIN_VERS 0x3131312A
#define DW_IC_COMP_TYPE 0xfc
#define DW_IC_COMP_TYPE_VALUE 0x44570140
#define MAX_T_POLL_COUNT 100
#define DW_TIMEOUT_IDLE (40 * MSECOND)
#define DW_TIMEOUT_TX (2 * MSECOND)
#define DW_TIMEOUT_RX (2 * MSECOND)
#define DW_IC_SDA_HOLD_RX_SHIFT 16
#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)
struct dw_i2c_dev {
void __iomem *base;
struct clk *clk;
struct i2c_adapter adapter;
u32 sda_hold_time;
};
static inline struct dw_i2c_dev *to_dw_i2c_dev(struct i2c_adapter *a)
{
return container_of(a, struct dw_i2c_dev, adapter);
}
static void i2c_dw_enable(struct dw_i2c_dev *dw, bool enable)
{
u32 reg = 0;
/*
* This subrotine is an implementation of an algorithm
* described in "Cyclone V Hard Processor System Technical
* Reference * Manual" p. 20-19, "Disabling the I2C Controller"
*/
int timeout = MAX_T_POLL_COUNT;
if (enable)
reg |= DW_IC_ENABLE_ENABLE;
do {
uint32_t ic_enable_status;
writel(reg, dw->base + DW_IC_ENABLE);
ic_enable_status = readl(dw->base + DW_IC_ENABLE_STATUS);
if ((ic_enable_status & DW_IC_ENABLE_STATUS_IC_EN) == enable)
return;
udelay(250);
} while (timeout--);
dev_warn(&dw->adapter.dev, "timeout in %sabling adapter\n",
enable ? "en" : "dis");
}
/*
* All of the code pertaining to tming calculation is taken from
* analogous driver in Linux kernel
*/
static uint32_t
i2c_dw_scl_hcnt(uint32_t ic_clk, uint32_t tSYMBOL, uint32_t tf, int cond,
int offset)
{
/*
* DesignWare I2C core doesn't seem to have solid strategy to meet
* the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec
* will result in violation of the tHD;STA spec.
*/
if (cond)
/*
* Conditional expression:
*
* IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH
*
* This is based on the DW manuals, and represents an ideal
* configuration. The resulting I2C bus speed will be
* faster than any of the others.
*
* If your hardware is free from tHD;STA issue, try this one.
*/
return (ic_clk * tSYMBOL + 500000) / 1000000 - 8 + offset;
else
/*
* Conditional expression:
*
* IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf)
*
* This is just experimental rule; the tHD;STA period turned
* out to be proportinal to (_HCNT + 3). With this setting,
* we could meet both tHIGH and tHD;STA timing specs.
*
* If unsure, you'd better to take this alternative.
*
* The reason why we need to take into account "tf" here,
* is the same as described in i2c_dw_scl_lcnt().
*/
return (ic_clk * (tSYMBOL + tf) + 500000) / 1000000
- 3 + offset;
}
static uint32_t
i2c_dw_scl_lcnt(uint32_t ic_clk, uint32_t tLOW, uint32_t tf, int offset)
{
/*
* Conditional expression:
*
* IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (tLOW + tf)
*
* DW I2C core starts counting the SCL CNTs for the LOW period
* of the SCL clock (tLOW) as soon as it pulls the SCL line.
* In order to meet the tLOW timing spec, we need to take into
* account the fall time of SCL signal (tf). Default tf value
* should be 0.3 us, for safety.
*/
return ((ic_clk * (tLOW + tf) + 500000) / 1000000) - 1 + offset;
}
static void i2c_dw_setup_timings(struct dw_i2c_dev *dw)
{
uint32_t hcnt, lcnt;
u32 reg;
const uint32_t sda_falling_time = 300; /* ns */
const uint32_t scl_falling_time = 300; /* ns */
const unsigned int input_clock_khz = clk_get_rate(dw->clk) / 1000;
/* Set SCL timing parameters for standard-mode */
hcnt = i2c_dw_scl_hcnt(input_clock_khz,
4000, /* tHD;STA = tHIGH = 4.0 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
lcnt = i2c_dw_scl_lcnt(input_clock_khz,
4700, /* tLOW = 4.7 us */
scl_falling_time,
0); /* No offset */
writel(hcnt, dw->base + DW_IC_SS_SCL_HCNT);
writel(lcnt, dw->base + DW_IC_SS_SCL_LCNT);
hcnt = i2c_dw_scl_hcnt(input_clock_khz,
600, /* tHD;STA = tHIGH = 0.6 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
lcnt = i2c_dw_scl_lcnt(input_clock_khz,
1300, /* tLOW = 1.3 us */
scl_falling_time,
0); /* No offset */
writel(hcnt, dw->base + DW_IC_FS_SCL_HCNT);
writel(lcnt, dw->base + DW_IC_FS_SCL_LCNT);
/* Configure SDA Hold Time if required */
reg = readl(dw->base + DW_IC_COMP_VERSION);
if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
u32 ht;
int ret;
ret = of_property_read_u32(dw->adapter.dev.device_node,
"i2c-sda-hold-time-ns", &ht);
if (ret) {
/* Keep previous hold time setting if no one set it */
dw->sda_hold_time = readl(dw->base + DW_IC_SDA_HOLD);
} else if (ht) {
dw->sda_hold_time = div_u64((u64)input_clock_khz * ht + 500000,
1000000);
}
/*
* Workaround for avoiding TX arbitration lost in case I2C
* slave pulls SDA down "too quickly" after falling egde of
* SCL by enabling non-zero SDA RX hold. Specification says it
* extends incoming SDA low to high transition while SCL is
* high but it apprears to help also above issue.
*/
if (!(dw->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
dw->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
dev_dbg(&dw->adapter.dev, "adjust SDA hold time.\n");
writel(dw->sda_hold_time, dw->base + DW_IC_SDA_HOLD);
}
}
static int i2c_dw_wait_for_bits(struct dw_i2c_dev *dw, uint32_t offset,
uint32_t mask, uint32_t value, uint64_t timeout)
{
const uint64_t start = get_time_ns();
do {
const uint32_t reg = readl(dw->base + offset);
if ((reg & mask) == value)
return 0;
} while (!is_timeout(start, timeout));
return -ETIMEDOUT;
}
static int i2c_dw_wait_for_idle(struct dw_i2c_dev *dw)
{
const uint32_t mask = DW_IC_STATUS_MST_ACTIVITY | DW_IC_STATUS_TFE;
const uint32_t value = DW_IC_STATUS_TFE;
return i2c_dw_wait_for_bits(dw, DW_IC_STATUS, mask, value,
DW_TIMEOUT_IDLE);
}
static int i2c_dw_wait_for_tx_fifo_not_full(struct dw_i2c_dev *dw)
{
const uint32_t mask = DW_IC_STATUS_TFNF;
const uint32_t value = DW_IC_STATUS_TFNF;
return i2c_dw_wait_for_bits(dw, DW_IC_STATUS, mask, value,
DW_TIMEOUT_TX);
}
static int i2c_dw_wait_for_rx_fifo_not_empty(struct dw_i2c_dev *dw)
{
const uint32_t mask = DW_IC_STATUS_RFNE;
const uint32_t value = DW_IC_STATUS_RFNE;
return i2c_dw_wait_for_bits(dw, DW_IC_STATUS, mask, value,
DW_TIMEOUT_RX);
}
static void i2c_dw_reset(struct dw_i2c_dev *dw)
{
i2c_dw_enable(dw, false);
i2c_dw_enable(dw, true);
}
static void i2c_dw_abort_tx(struct dw_i2c_dev *dw)
{
i2c_dw_reset(dw);
}
static void i2c_dw_abort_rx(struct dw_i2c_dev *dw)
{
i2c_dw_reset(dw);
}
static int i2c_dw_read(struct dw_i2c_dev *dw,
const struct i2c_msg *msg)
{
int i;
for (i = 0; i < msg->len; i++) {
int ret;
const bool last_byte = i == msg->len - 1;
uint32_t ic_cmd_data = DW_IC_DATA_CMD_CMD;
if (last_byte)
ic_cmd_data |= DW_IC_DATA_CMD_STOP;
writel(ic_cmd_data, dw->base + DW_IC_DATA_CMD);
ret = i2c_dw_wait_for_rx_fifo_not_empty(dw);
if (ret < 0) {
i2c_dw_abort_rx(dw);
return ret;
}
msg->buf[i] = (uint8_t)readl(dw->base + DW_IC_DATA_CMD);
}
return msg->len;
}
static int i2c_dw_write(struct dw_i2c_dev *dw,
const struct i2c_msg *msg)
{
int i;
uint32_t ic_int_stat;
for (i = 0; i < msg->len; i++) {
int ret;
uint32_t ic_cmd_data;
const bool last_byte = i == msg->len - 1;
ic_int_stat = readl(dw->base + DW_IC_RAW_INTR_STAT);
if (ic_int_stat & DW_IC_INTR_TX_ABRT)
return -EIO;
ret = i2c_dw_wait_for_tx_fifo_not_full(dw);
if (ret < 0) {
i2c_dw_abort_tx(dw);
return ret;
}
ic_cmd_data = msg->buf[i];
if (last_byte)
ic_cmd_data |= DW_IC_DATA_CMD_STOP;
writel(ic_cmd_data, dw->base + DW_IC_DATA_CMD);
}
return msg->len;
}
static int i2c_dw_wait_for_stop(struct dw_i2c_dev *dw)
{
const uint32_t mask = DW_IC_INTR_STOP_DET;
const uint32_t value = DW_IC_INTR_STOP_DET;
return i2c_dw_wait_for_bits(dw, DW_IC_RAW_INTR_STAT, mask, value,
DW_TIMEOUT_IDLE);
}
static int i2c_dw_finish_xfer(struct dw_i2c_dev *dw)
{
int ret;
uint32_t ic_int_stat;
/*
* We expect the controller to signal STOP condition on the
* bus, so we are going to wait for that first.
*/
ret = i2c_dw_wait_for_stop(dw);
if (ret < 0)
return ret;
/*
* Now that we now that the stop condition has been signaled
* we need to wait for controller to go into IDLE state to
* make sure all of the possible error conditions on the bus
* have been propagated to apporpriate status
* registers. Experiment shows that not doing so often results
* in false positive "successful" transfers
*/
ret = i2c_dw_wait_for_idle(dw);
if (ret >= 0) {
ic_int_stat = readl(dw->base + DW_IC_RAW_INTR_STAT);
if (ic_int_stat & DW_IC_INTR_TX_ABRT)
return -EIO;
}
return ret;
}
static int i2c_dw_set_address(struct dw_i2c_dev *dw, uint8_t address)
{
int ret;
uint32_t ic_tar;
/*
* As per "Cyclone V Hard Processor System Technical Reference
* Manual" p. 20-19, we have to wait for controller to be in
* idle state in order to be able to set the address
* dynamically
*/
ret = i2c_dw_wait_for_idle(dw);
if (ret < 0)
return ret;
ic_tar = readl(dw->base + DW_IC_TAR);
ic_tar &= 0xfffffc00;
writel(ic_tar | address, dw->base + DW_IC_TAR);
return 0;
}
static int i2c_dw_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs, int num)
{
int i, ret = 0;
struct dw_i2c_dev *dw = to_dw_i2c_dev(adapter);
for (i = 0; i < num; i++) {
if (msgs[i].flags & I2C_M_DATA_ONLY)
return -ENOTSUPP;
ret = i2c_dw_set_address(dw, msgs[i].addr);
if (ret < 0)
break;
if (msgs[i].flags & I2C_M_RD)
ret = i2c_dw_read(dw, &msgs[i]);
else
ret = i2c_dw_write(dw, &msgs[i]);
if (ret < 0)
break;
ret = i2c_dw_finish_xfer(dw);
if (ret < 0)
break;
}
if (ret == -EIO) {
/*
* If we got -EIO it means that transfer was for some
* reason aborted, so we should figure out the reason
* and take steps to clear that condition
*/
const uint32_t ic_tx_abrt_source =
readl(dw->base + DW_IC_TX_ABRT_SOURCE);
dev_dbg(&dw->adapter.dev,
"<%s> ic_tx_abrt_source: 0x%04x\n",
__func__, ic_tx_abrt_source);
readl(dw->base + DW_IC_CLR_TX_ABRT);
return ret;
}
if (ret < 0) {
i2c_dw_reset(dw);
return ret;
}
return num;
}
static int i2c_dw_probe(struct device_d *pdev)
{
struct resource *iores;
struct dw_i2c_dev *dw;
struct i2c_platform_data *pdata;
int ret, bitrate;
uint32_t ic_con, ic_comp_type_value;
pdata = pdev->platform_data;
dw = xzalloc(sizeof(*dw));
if (IS_ENABLED(CONFIG_COMMON_CLK)) {
dw->clk = clk_get(pdev, NULL);
if (IS_ERR(dw->clk)) {
ret = PTR_ERR(dw->clk);
goto fail;
}
}
dw->adapter.master_xfer = i2c_dw_xfer;
dw->adapter.nr = pdev->id;
dw->adapter.dev.parent = pdev;
dw->adapter.dev.device_node = pdev->device_node;
iores = dev_request_mem_resource(pdev, 0);
if (IS_ERR(iores)) {
ret = PTR_ERR(iores);
goto fail;
}
dw->base = IOMEM(iores->start);
ic_comp_type_value = readl(dw->base + DW_IC_COMP_TYPE);
if (ic_comp_type_value != DW_IC_COMP_TYPE_VALUE) {
dev_err(pdev,
"unknown DesignWare IP block 0x%08x",
ic_comp_type_value);
ret = -ENODEV;
goto fail;
}
i2c_dw_enable(dw, false);
if (IS_ENABLED(CONFIG_COMMON_CLK))
i2c_dw_setup_timings(dw);
bitrate = (pdata && pdata->bitrate) ? pdata->bitrate : DW_I2C_BIT_RATE;
/*
* We have to clear 'ic_10bitaddr_master' in 'ic_tar'
* register, otherwise 'ic_10bitaddr_master' in 'ic_con'
* wouldn't clear. We don't care about preserving the contents
* of that register so we set it to zero.
*/
writel(0, dw->base + DW_IC_TAR);
switch (bitrate) {
case 400000:
ic_con = DW_IC_CON_SPEED_FAST;
break;
default:
dev_warn(pdev, "requested bitrate (%d) is not supported."
" Falling back to 100kHz", bitrate);
case 100000: /* FALLTHROUGH */
ic_con = DW_IC_CON_SPEED_STD;
break;
}
ic_con |= DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE;
writel(ic_con, dw->base + DW_IC_CON);
/*
* Since we will be working in polling mode set both
* thresholds to their minimum
*/
writel(0, dw->base + DW_IC_RX_TL);
writel(0, dw->base + DW_IC_TX_TL);
/* Disable and clear all interrrupts */
writel(0, dw->base + DW_IC_INTR_MASK);
readl(dw->base + DW_IC_CLR_INTR);
i2c_dw_enable(dw, true);
ret = i2c_add_numbered_adapter(&dw->adapter);
fail:
if (ret < 0)
kfree(dw);
return ret;
}
static __maybe_unused struct of_device_id i2c_dw_dt_ids[] = {
{ .compatible = "snps,designware-i2c", },
{ /* sentinel */ }
};
static struct driver_d i2c_dw_driver = {
.probe = i2c_dw_probe,
.name = "i2c-designware",
.of_compatible = DRV_OF_COMPAT(i2c_dw_dt_ids),
};
coredevice_platform_driver(i2c_dw_driver);
| masahir0y/barebox-yamada | drivers/i2c/busses/i2c-designware.c | C | gpl-2.0 | 15,486 |
/* Name: usbdrv.c
* Project: AVR USB driver
* Author: Christian Starkjohann
* Creation Date: 2004-12-29
* Tabsize: 4
* Copyright: (c) 2005 by OBJECTIVE DEVELOPMENT Software GmbH
* License: GNU GPL v2 (see License.txt) or proprietary (CommercialLicense.txt)
* This Revision: $Id: usbdrv.c,v 1.1.1.1 2008-01-22 20:28:25 raph Exp $
*/
#include "iarcompat.h"
#ifndef __IAR_SYSTEMS_ICC__
# include <avr/io.h>
# include <avr/pgmspace.h>
#endif
#include "usbdrv.h"
#include "oddebug.h"
/*
General Description:
This module implements the C-part of the USB driver. See usbdrv.h for a
documentation of the entire driver.
*/
#ifndef IAR_SECTION
#define IAR_SECTION(arg)
#define __no_init
#endif
/* The macro IAR_SECTION is a hack to allow IAR-cc compatibility. On gcc, it
* is defined to nothing. __no_init is required on IAR.
*/
/* ------------------------------------------------------------------------- */
/* raw USB registers / interface to assembler code: */
uchar usbRxBuf[2*USB_BUFSIZE]; /* raw RX buffer: PID, 8 bytes data, 2 bytes CRC */
uchar usbInputBufOffset; /* offset in usbRxBuf used for low level receiving */
uchar usbDeviceAddr; /* assigned during enumeration, defaults to 0 */
uchar usbNewDeviceAddr; /* device ID which should be set after status phase */
uchar usbConfiguration; /* currently selected configuration. Administered by driver, but not used */
volatile schar usbRxLen; /* = 0; number of bytes in usbRxBuf; 0 means free, -1 for flow control */
uchar usbCurrentTok; /* last token received, if more than 1 rx endpoint: MSb=endpoint */
uchar usbRxToken; /* token for data we received; if more than 1 rx endpoint: MSb=endpoint */
uchar usbMsgLen = 0xff; /* remaining number of bytes, no msg to send if -1 (see usbMsgPtr) */
volatile uchar usbTxLen = USBPID_NAK; /* number of bytes to transmit with next IN token or handshake token */
uchar usbTxBuf[USB_BUFSIZE];/* data to transmit with next IN, free if usbTxLen contains handshake token */
# if USB_COUNT_SOF
volatile uchar usbSofCount; /* incremented by assembler module every SOF */
# endif
#if USB_CFG_HAVE_INTRIN_ENDPOINT
volatile uchar usbTxLen1 = USBPID_NAK; /* TX count for endpoint 1 */
uchar usbTxBuf1[USB_BUFSIZE]; /* TX data for endpoint 1 */
#if USB_CFG_HAVE_INTRIN_ENDPOINT3
volatile uchar usbTxLen3 = USBPID_NAK; /* TX count for endpoint 3 */
uchar usbTxBuf3[USB_BUFSIZE]; /* TX data for endpoint 3 */
#endif
#endif
/* USB status registers / not shared with asm code */
uchar *usbMsgPtr; /* data to transmit next -- ROM or RAM address */
static uchar usbMsgFlags; /* flag values see below */
#define USB_FLG_TX_PACKET (1<<0)
/* Leave free 6 bits after TX_PACKET. This way we can increment usbMsgFlags to toggle TX_PACKET */
#define USB_FLG_MSGPTR_IS_ROM (1<<6)
#define USB_FLG_USE_DEFAULT_RW (1<<7)
/*
optimizing hints:
- do not post/pre inc/dec integer values in operations
- assign value of PRG_RDB() to register variables and don't use side effects in arg
- use narrow scope for variables which should be in X/Y/Z register
- assign char sized expressions to variables to force 8 bit arithmetics
*/
/* ------------------------------------------------------------------------- */
#if USB_CFG_DESCR_PROPS_STRINGS == 0
#if USB_CFG_DESCR_PROPS_STRING_0 == 0
#undef USB_CFG_DESCR_PROPS_STRING_0
#define USB_CFG_DESCR_PROPS_STRING_0 sizeof(usbDescriptorString0)
PROGMEM const char usbDescriptorString0[] = { /* language descriptor */
4, /* sizeof(usbDescriptorString0): length of descriptor in bytes */
3, /* descriptor type */
0x09, 0x04, /* language index (0x0409 = US-English) */
};
#endif
#if USB_CFG_DESCR_PROPS_STRING_VENDOR == 0 && USB_CFG_VENDOR_NAME_LEN
#undef USB_CFG_DESCR_PROPS_STRING_VENDOR
#define USB_CFG_DESCR_PROPS_STRING_VENDOR sizeof(usbDescriptorStringVendor)
PROGMEM const int usbDescriptorStringVendor[] = {
USB_STRING_DESCRIPTOR_HEADER(USB_CFG_VENDOR_NAME_LEN),
USB_CFG_VENDOR_NAME
};
#endif
#if USB_CFG_DESCR_PROPS_STRING_PRODUCT == 0 && USB_CFG_DEVICE_NAME_LEN
#undef USB_CFG_DESCR_PROPS_STRING_PRODUCT
#define USB_CFG_DESCR_PROPS_STRING_PRODUCT sizeof(usbDescriptorStringDevice)
PROGMEM const int usbDescriptorStringDevice[] = {
USB_STRING_DESCRIPTOR_HEADER(USB_CFG_DEVICE_NAME_LEN),
USB_CFG_DEVICE_NAME
};
#endif
#if USB_CFG_DESCR_PROPS_STRING_SERIAL_NUMBER == 0 && USB_CFG_SERIAL_NUMBER_LEN
#undef USB_CFG_DESCR_PROPS_STRING_SERIAL_NUMBER
#define USB_CFG_DESCR_PROPS_STRING_SERIAL_NUMBER sizeof(usbDescriptorStringSerialNumber)
PROGMEM int usbDescriptorStringSerialNumber[] = {
USB_STRING_DESCRIPTOR_HEADER(USB_CFG_SERIAL_NUMBER_LEN),
USB_CFG_SERIAL_NUMBER
};
#endif
#endif /* USB_CFG_DESCR_PROPS_STRINGS == 0 */
#if USB_CFG_DESCR_PROPS_DEVICE == 0
#undef USB_CFG_DESCR_PROPS_DEVICE
#define USB_CFG_DESCR_PROPS_DEVICE sizeof(usbDescriptorDevice)
PROGMEM char usbDescriptorDevice[] = { /* USB device descriptor */
18, /* sizeof(usbDescriptorDevice): length of descriptor in bytes */
USBDESCR_DEVICE, /* descriptor type */
0x10, 0x01, /* USB version supported */
USB_CFG_DEVICE_CLASS,
USB_CFG_DEVICE_SUBCLASS,
0, /* protocol */
8, /* max packet size */
USB_CFG_VENDOR_ID, /* 2 bytes */
USB_CFG_DEVICE_ID, /* 2 bytes */
USB_CFG_DEVICE_VERSION, /* 2 bytes */
USB_CFG_DESCR_PROPS_STRING_VENDOR != 0 ? 1 : 0, /* manufacturer string index */
USB_CFG_DESCR_PROPS_STRING_PRODUCT != 0 ? 2 : 0, /* product string index */
USB_CFG_DESCR_PROPS_STRING_SERIAL_NUMBER != 0 ? 3 : 0, /* serial number string index */
1, /* number of configurations */
};
#endif
#if USB_CFG_DESCR_PROPS_HID_REPORT != 0 && USB_CFG_DESCR_PROPS_HID == 0
#undef USB_CFG_DESCR_PROPS_HID
#define USB_CFG_DESCR_PROPS_HID 9 /* length of HID descriptor in config descriptor below */
#endif
#if USB_CFG_DESCR_PROPS_CONFIGURATION == 0
#undef USB_CFG_DESCR_PROPS_CONFIGURATION
#define USB_CFG_DESCR_PROPS_CONFIGURATION sizeof(usbDescriptorConfiguration)
PROGMEM char usbDescriptorConfiguration[] = { /* USB configuration descriptor */
9, /* sizeof(usbDescriptorConfiguration): length of descriptor in bytes */
USBDESCR_CONFIG, /* descriptor type */
18 + 7 * USB_CFG_HAVE_INTRIN_ENDPOINT + (USB_CFG_DESCR_PROPS_HID & 0xff), 0,
/* total length of data returned (including inlined descriptors) */
1, /* number of interfaces in this configuration */
1, /* index of this configuration */
0, /* configuration name string index */
#if USB_CFG_IS_SELF_POWERED
USBATTR_SELFPOWER, /* attributes */
#else
USBATTR_BUSPOWER, /* attributes */
#endif
USB_CFG_MAX_BUS_POWER/2, /* max USB current in 2mA units */
/* interface descriptor follows inline: */
9, /* sizeof(usbDescrInterface): length of descriptor in bytes */
USBDESCR_INTERFACE, /* descriptor type */
0, /* index of this interface */
0, /* alternate setting for this interface */
USB_CFG_HAVE_INTRIN_ENDPOINT, /* endpoints excl 0: number of endpoint descriptors to follow */
USB_CFG_INTERFACE_CLASS,
USB_CFG_INTERFACE_SUBCLASS,
USB_CFG_INTERFACE_PROTOCOL,
0, /* string index for interface */
#if (USB_CFG_DESCR_PROPS_HID & 0xff) /* HID descriptor */
9, /* sizeof(usbDescrHID): length of descriptor in bytes */
USBDESCR_HID, /* descriptor type: HID */
0x01, 0x01, /* BCD representation of HID version */
0x00, /* target country code */
0x01, /* number of HID Report (or other HID class) Descriptor infos to follow */
0x22, /* descriptor type: report */
USB_CFG_HID_REPORT_DESCRIPTOR_LENGTH, 0, /* total length of report descriptor */
#endif
#if USB_CFG_HAVE_INTRIN_ENDPOINT /* endpoint descriptor for endpoint 1 */
7, /* sizeof(usbDescrEndpoint) */
USBDESCR_ENDPOINT, /* descriptor type = endpoint */
0x81, /* IN endpoint number 1 */
0x03, /* attrib: Interrupt endpoint */
8, 0, /* maximum packet size */
USB_CFG_INTR_POLL_INTERVAL, /* in ms */
#endif
};
#endif
/* We don't use prog_int or prog_int16_t for compatibility with various libc
* versions. Here's an other compatibility hack:
*/
#ifndef PRG_RDB
#define PRG_RDB(addr) pgm_read_byte(addr)
#endif
typedef union{
unsigned word;
uchar *ptr;
uchar bytes[2];
}converter_t;
/* We use this union to do type conversions. This is better optimized than
* type casts in gcc 3.4.3 and much better than using bit shifts to build
* ints from chars. Byte ordering is not a problem on an 8 bit platform.
*/
/* ------------------------------------------------------------------------- */
#if USB_CFG_HAVE_INTRIN_ENDPOINT
USB_PUBLIC void usbSetInterrupt(uchar *data, uchar len)
{
uchar *p, i;
#if USB_CFG_IMPLEMENT_HALT
if(usbTxLen1 == USBPID_STALL)
return;
#endif
#if 0 /* No runtime checks! Caller is responsible for valid data! */
if(len > 8) /* interrupt transfers are limited to 8 bytes */
len = 8;
#endif
if(usbTxLen1 & 0x10){ /* packet buffer was empty */
usbTxBuf1[0] ^= USBPID_DATA0 ^ USBPID_DATA1; /* toggle token */
}else{
usbTxLen1 = USBPID_NAK; /* avoid sending outdated (overwritten) interrupt data */
}
p = usbTxBuf1 + 1;
for(i=len;i--;)
*p++ = *data++;
usbCrc16Append(&usbTxBuf1[1], len);
usbTxLen1 = len + 4; /* len must be given including sync byte */
DBG2(0x21, usbTxBuf1, len + 3);
}
#endif
#if USB_CFG_HAVE_INTRIN_ENDPOINT3
USB_PUBLIC void usbSetInterrupt3(uchar *data, uchar len)
{
uchar *p, i;
if(usbTxLen3 & 0x10){ /* packet buffer was empty */
usbTxBuf3[0] ^= USBPID_DATA0 ^ USBPID_DATA1; /* toggle token */
}else{
usbTxLen3 = USBPID_NAK; /* avoid sending outdated (overwritten) interrupt data */
}
p = usbTxBuf3 + 1;
for(i=len;i--;)
*p++ = *data++;
usbCrc16Append(&usbTxBuf3[1], len);
usbTxLen3 = len + 4; /* len must be given including sync byte */
DBG2(0x23, usbTxBuf3, len + 3);
}
#endif
static uchar usbRead(uchar *data, uchar len)
{
#if USB_CFG_IMPLEMENT_FN_READ
if(usbMsgFlags & USB_FLG_USE_DEFAULT_RW){
#endif
uchar i = len, *r = usbMsgPtr;
if(usbMsgFlags & USB_FLG_MSGPTR_IS_ROM){ /* ROM data */
while(i--){
uchar c = PRG_RDB(r); /* assign to char size variable to enforce byte ops */
*data++ = c;
r++;
}
}else{ /* RAM data */
while(i--)
*data++ = *r++;
}
usbMsgPtr = r;
return len;
#if USB_CFG_IMPLEMENT_FN_READ
}else{
if(len != 0) /* don't bother app with 0 sized reads */
return usbFunctionRead(data, len);
return 0;
}
#endif
}
#define GET_DESCRIPTOR(cfgProp, staticName) \
if(cfgProp){ \
if((cfgProp) & USB_PROP_IS_RAM) \
flags &= ~USB_FLG_MSGPTR_IS_ROM; \
if((cfgProp) & USB_PROP_IS_DYNAMIC){ \
replyLen = usbFunctionDescriptor(rq); \
}else{ \
replyData = (uchar *)(staticName); \
SET_REPLY_LEN((cfgProp) & 0xff); \
} \
}
/* We use if() instead of #if in the macro above because #if can't be used
* in macros and the compiler optimizes constant conditions anyway.
*/
/* Don't make this function static to avoid inlining.
* The entire function would become too large and exceed the range of
* relative jumps.
* 2006-02-25: Either gcc 3.4.3 is better than the gcc used when the comment
* above was written, or other parts of the code have changed. We now get
* better results with an inlined function. Test condition: PowerSwitch code.
*/
static void usbProcessRx(uchar *data, uchar len)
{
usbRequest_t *rq = (void *)data;
uchar replyLen = 0, flags = USB_FLG_USE_DEFAULT_RW;
/* We use if() cascades because the compare is done byte-wise while switch()
* is int-based. The if() cascades are therefore more efficient.
*/
/* usbRxToken can be:
* 0x2d 00101101 (USBPID_SETUP for endpoint 0)
* 0xe1 11100001 (USBPID_OUT for endpoint 0)
* 0xff 11111111 (USBPID_OUT for endpoint 1)
*/
DBG2(0x10 + ((usbRxToken >> 1) & 3), data, len); /* SETUP0=12; OUT0=10; OUT1=13 */
#ifdef USB_RX_USER_HOOK
USB_RX_USER_HOOK(data, len)
#endif
#if USB_CFG_IMPLEMENT_FN_WRITEOUT
if(usbRxToken == 0xff){
usbFunctionWriteOut(data, len);
return; /* no reply expected, hence no usbMsgPtr, usbMsgFlags, usbMsgLen set */
}
#endif
if(usbRxToken == (uchar)USBPID_SETUP){
usbTxLen = USBPID_NAK; /* abort pending transmit */
if(len == 8){ /* Setup size must be always 8 bytes. Ignore otherwise. */
uchar type = rq->bmRequestType & USBRQ_TYPE_MASK;
if(type == USBRQ_TYPE_STANDARD){
#define SET_REPLY_LEN(len) replyLen = (len); usbMsgPtr = replyData
/* This macro ensures that replyLen and usbMsgPtr are always set in the same way.
* That allows optimization of common code in if() branches */
uchar *replyData = usbTxBuf + 9; /* there is 3 bytes free space at the end of the buffer */
replyData[0] = 0; /* common to USBRQ_GET_STATUS and USBRQ_GET_INTERFACE */
if(rq->bRequest == USBRQ_GET_STATUS){ /* 0 */
uchar __attribute__((__unused__)) recipient = rq->bmRequestType & USBRQ_RCPT_MASK; /* assign arith ops to variables to enforce byte size */
#if USB_CFG_IS_SELF_POWERED
if(recipient == USBRQ_RCPT_DEVICE)
replyData[0] = USB_CFG_IS_SELF_POWERED;
#endif
#if USB_CFG_HAVE_INTRIN_ENDPOINT && USB_CFG_IMPLEMENT_HALT
if(recipient == USBRQ_RCPT_ENDPOINT && rq->wIndex.bytes[0] == 0x81) /* request status for endpoint 1 */
replyData[0] = usbTxLen1 == USBPID_STALL;
#endif
replyData[1] = 0;
SET_REPLY_LEN(2);
}else if(rq->bRequest == USBRQ_SET_ADDRESS){ /* 5 */
usbNewDeviceAddr = rq->wValue.bytes[0];
}else if(rq->bRequest == USBRQ_GET_DESCRIPTOR){ /* 6 */
flags = USB_FLG_MSGPTR_IS_ROM | USB_FLG_USE_DEFAULT_RW;
if(rq->wValue.bytes[1] == USBDESCR_DEVICE){ /* 1 */
GET_DESCRIPTOR(USB_CFG_DESCR_PROPS_DEVICE, usbDescriptorDevice)
}else if(rq->wValue.bytes[1] == USBDESCR_CONFIG){ /* 2 */
GET_DESCRIPTOR(USB_CFG_DESCR_PROPS_CONFIGURATION, usbDescriptorConfiguration)
}else if(rq->wValue.bytes[1] == USBDESCR_STRING){ /* 3 */
#if USB_CFG_DESCR_PROPS_STRINGS & USB_PROP_IS_DYNAMIC
if(USB_CFG_DESCR_PROPS_STRINGS & USB_PROP_IS_RAM)
flags &= ~USB_FLG_MSGPTR_IS_ROM;
replyLen = usbFunctionDescriptor(rq);
#else /* USB_CFG_DESCR_PROPS_STRINGS & USB_PROP_IS_DYNAMIC */
if(rq->wValue.bytes[0] == 0){ /* descriptor index */
GET_DESCRIPTOR(USB_CFG_DESCR_PROPS_STRING_0, usbDescriptorString0)
}else if(rq->wValue.bytes[0] == 1){
GET_DESCRIPTOR(USB_CFG_DESCR_PROPS_STRING_VENDOR, usbDescriptorStringVendor)
}else if(rq->wValue.bytes[0] == 2){
GET_DESCRIPTOR(USB_CFG_DESCR_PROPS_STRING_PRODUCT, usbDescriptorStringDevice)
}else if(rq->wValue.bytes[0] == 3){
GET_DESCRIPTOR(USB_CFG_DESCR_PROPS_STRING_SERIAL_NUMBER, usbDescriptorStringSerialNumber)
}else if(USB_CFG_DESCR_PROPS_UNKNOWN & USB_PROP_IS_DYNAMIC){
replyLen = usbFunctionDescriptor(rq);
}
#endif /* USB_CFG_DESCR_PROPS_STRINGS & USB_PROP_IS_DYNAMIC */
#if USB_CFG_DESCR_PROPS_HID_REPORT /* only support HID descriptors if enabled */
}else if(rq->wValue.bytes[1] == USBDESCR_HID){ /* 0x21 */
GET_DESCRIPTOR(USB_CFG_DESCR_PROPS_HID, usbDescriptorConfiguration + 18)
}else if(rq->wValue.bytes[1] == USBDESCR_HID_REPORT){ /* 0x22 */
GET_DESCRIPTOR(USB_CFG_DESCR_PROPS_HID_REPORT, usbDescriptorHidReport)
#endif /* USB_CFG_DESCR_PROPS_HID_REPORT */
}else if(USB_CFG_DESCR_PROPS_UNKNOWN & USB_PROP_IS_DYNAMIC){
replyLen = usbFunctionDescriptor(rq);
}
}else if(rq->bRequest == USBRQ_GET_CONFIGURATION){ /* 8 */
replyData = &usbConfiguration; /* send current configuration value */
SET_REPLY_LEN(1);
}else if(rq->bRequest == USBRQ_SET_CONFIGURATION){ /* 9 */
usbConfiguration = rq->wValue.bytes[0];
#if USB_CFG_IMPLEMENT_HALT
usbTxLen1 = USBPID_NAK;
#endif
}else if(rq->bRequest == USBRQ_GET_INTERFACE){ /* 10 */
SET_REPLY_LEN(1);
#if USB_CFG_HAVE_INTRIN_ENDPOINT
}else if(rq->bRequest == USBRQ_SET_INTERFACE){ /* 11 */
USB_SET_DATATOKEN1(USB_INITIAL_DATATOKEN); /* reset data toggling for interrupt endpoint */
# if USB_CFG_HAVE_INTRIN_ENDPOINT3
USB_SET_DATATOKEN3(USB_INITIAL_DATATOKEN); /* reset data toggling for interrupt endpoint */
# endif
# if USB_CFG_IMPLEMENT_HALT
usbTxLen1 = USBPID_NAK;
}else if(rq->bRequest == USBRQ_CLEAR_FEATURE || rq->bRequest == USBRQ_SET_FEATURE){ /* 1|3 */
if(rq->wValue.bytes[0] == 0 && rq->wIndex.bytes[0] == 0x81){ /* feature 0 == HALT for endpoint == 1 */
usbTxLen1 = rq->bRequest == USBRQ_CLEAR_FEATURE ? USBPID_NAK : USBPID_STALL;
USB_SET_DATATOKEN1(USB_INITIAL_DATATOKEN); /* reset data toggling for interrupt endpoint */
# if USB_CFG_HAVE_INTRIN_ENDPOINT3
USB_SET_DATATOKEN3(USB_INITIAL_DATATOKEN); /* reset data toggling for interrupt endpoint */
# endif
}
# endif
#endif
}else{
/* the following requests can be ignored, send default reply */
/* 1: CLEAR_FEATURE, 3: SET_FEATURE, 7: SET_DESCRIPTOR */
/* 12: SYNCH_FRAME */
}
#undef SET_REPLY_LEN
}else{ /* not a standard request -- must be vendor or class request */
replyLen = usbFunctionSetup(data);
}
#if USB_CFG_IMPLEMENT_FN_READ || USB_CFG_IMPLEMENT_FN_WRITE
if(replyLen == 0xff){ /* use user-supplied read/write function */
if((rq->bmRequestType & USBRQ_DIR_MASK) == USBRQ_DIR_DEVICE_TO_HOST){
replyLen = rq->wLength.bytes[0]; /* IN transfers only */
}
flags &= ~USB_FLG_USE_DEFAULT_RW; /* we have no valid msg, use user supplied read/write functions */
}else /* The 'else' prevents that we limit a replyLen of 0xff to the maximum transfer len. */
#endif
if(!rq->wLength.bytes[1] && replyLen > rq->wLength.bytes[0]) /* limit length to max */
replyLen = rq->wLength.bytes[0];
}
/* make sure that data packets which are sent as ACK to an OUT transfer are always zero sized */
}else{ /* DATA packet from out request */
#if USB_CFG_IMPLEMENT_FN_WRITE
if(!(usbMsgFlags & USB_FLG_USE_DEFAULT_RW)){
uchar rval = usbFunctionWrite(data, len);
replyLen = 0xff;
if(rval == 0xff){ /* an error occurred */
usbMsgLen = 0xff; /* cancel potentially pending data packet for ACK */
usbTxLen = USBPID_STALL;
}else if(rval != 0){ /* This was the final package */
replyLen = 0; /* answer with a zero-sized data packet */
}
flags = 0; /* start with a DATA1 package, stay with user supplied write() function */
}
#endif
}
usbMsgFlags = flags;
usbMsgLen = replyLen;
}
/* ------------------------------------------------------------------------- */
static void usbBuildTxBlock(void)
{
uchar wantLen, len, txLen, token;
wantLen = usbMsgLen;
if(wantLen > 8)
wantLen = 8;
usbMsgLen -= wantLen;
token = USBPID_DATA1;
if(usbMsgFlags & USB_FLG_TX_PACKET)
token = USBPID_DATA0;
usbMsgFlags++;
len = usbRead(usbTxBuf + 1, wantLen);
if(len <= 8){ /* valid data packet */
usbCrc16Append(&usbTxBuf[1], len);
txLen = len + 4; /* length including sync byte */
if(len < 8) /* a partial package identifies end of message */
usbMsgLen = 0xff;
}else{
txLen = USBPID_STALL; /* stall the endpoint */
usbMsgLen = 0xff;
}
usbTxBuf[0] = token;
usbTxLen = txLen;
DBG2(0x20, usbTxBuf, txLen-1);
}
static inline uchar isNotSE0(void)
{
uchar rval;
/* We want to do
* return (USBIN & USBMASK);
* here, but the compiler does int-expansion acrobatics.
* We can avoid this by assigning to a char-sized variable.
*/
rval = USBIN & USBMASK;
return rval;
}
/* ------------------------------------------------------------------------- */
USB_PUBLIC void usbPoll(void)
{
schar len;
uchar i;
if((len = usbRxLen) > 0){
/* We could check CRC16 here -- but ACK has already been sent anyway. If you
* need data integrity checks with this driver, check the CRC in your app
* code and report errors back to the host. Since the ACK was already sent,
* retries must be handled on application level.
* unsigned crc = usbCrc16(buffer + 1, usbRxLen - 3);
*/
usbProcessRx(usbRxBuf + USB_BUFSIZE + 1 - usbInputBufOffset, len - 3);
#if USB_CFG_HAVE_FLOWCONTROL
if(usbRxLen > 0) /* only mark as available if not inactivated */
usbRxLen = 0;
#else
usbRxLen = 0; /* mark rx buffer as available */
#endif
}
if(usbTxLen & 0x10){ /* transmit system idle */
if(usbMsgLen != 0xff){ /* transmit data pending? */
usbBuildTxBlock();
}
}
for(i = 10; i > 0; i--){
if(isNotSE0())
break;
}
if(i == 0){ /* RESET condition, called multiple times during reset */
usbNewDeviceAddr = 0;
usbDeviceAddr = 0;
#if USB_CFG_IMPLEMENT_HALT
usbTxLen1 = USBPID_NAK;
#if USB_CFG_HAVE_INTRIN_ENDPOINT3
usbTxLen3 = USBPID_NAK;
#endif
#endif
DBG1(0xff, 0, 0);
}
}
/* ------------------------------------------------------------------------- */
USB_PUBLIC void usbInit(void)
{
#if USB_INTR_CFG_SET != 0
USB_INTR_CFG |= USB_INTR_CFG_SET;
#endif
#if USB_INTR_CFG_CLR != 0
USB_INTR_CFG &= ~(USB_INTR_CFG_CLR);
#endif
USB_INTR_ENABLE |= (1 << USB_INTR_ENABLE_BIT);
#if USB_CFG_HAVE_INTRIN_ENDPOINT
USB_SET_DATATOKEN1(USB_INITIAL_DATATOKEN); /* reset data toggling for interrupt endpoint */
# if USB_CFG_HAVE_INTRIN_ENDPOINT3
USB_SET_DATATOKEN3(USB_INITIAL_DATATOKEN); /* reset data toggling for interrupt endpoint */
# endif
#endif
}
/* ------------------------------------------------------------------------- */
| sambrista/9-buttons-arcade-controller | usbdrv/usbdrv.c | C | gpl-2.0 | 23,890 |
/*****************************************************************************
* xa.c : xa file demux module for vlc
*****************************************************************************
* Copyright (C) 2005 Rémi Denis-Courmont
* $Id$
*
* Authors: Rémi Denis-Courmont <rem # videolan.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
/*****************************************************************************
* Preamble
*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <assert.h>
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_demux.h>
/*****************************************************************************
* Module descriptor
*****************************************************************************/
static int Open ( vlc_object_t * );
static void Close( vlc_object_t * );
vlc_module_begin ()
set_description( N_("XA demuxer") )
set_category( CAT_INPUT )
set_subcategory( SUBCAT_INPUT_DEMUX )
set_capability( "demux", 10 )
set_callbacks( Open, Close )
vlc_module_end ()
/*****************************************************************************
* Local prototypes
*****************************************************************************/
static int Demux ( demux_t * );
static int Control( demux_t *, int i_query, va_list args );
struct demux_sys_t
{
es_out_id_t *p_es;
int64_t i_data_offset;
unsigned int i_data_size;
unsigned int i_block_frames;
unsigned int i_frame_size;
unsigned int i_bitrate;
date_t pts;
};
typedef struct xa_header_t
{
char xa_id[4];
uint32_t iSize;
uint16_t wFormatTag;
uint16_t nChannels;
uint32_t nSamplesPerSec;
uint32_t nAvgBytesPerSec;
uint16_t nBlockAlign;
uint16_t wBitsPerSample;
} xa_header_t;
static_assert(offsetof(xa_header_t, wBitsPerSample) == 22, "Bad padding");
#define FRAME_LENGTH 28 /* samples per frame */
/*****************************************************************************
* Open: check file and initializes structures
*****************************************************************************/
static int Open( vlc_object_t * p_this )
{
demux_t *p_demux = (demux_t*)p_this;
const uint8_t *peek;
/* XA file heuristic */
if( vlc_stream_Peek( p_demux->s, &peek, 10 ) < 10 )
return VLC_EGENERIC;
if( memcmp( peek, "XAI", 4 ) && memcmp( peek, "XAJ", 4 ) )
return VLC_EGENERIC;
if( GetWLE( peek + 8 ) != 1 ) /* format tag */
return VLC_EGENERIC;
demux_sys_t *p_sys = malloc( sizeof( demux_sys_t ) );
if( unlikely( p_sys == NULL ) )
return VLC_ENOMEM;
/* read XA header*/
xa_header_t xa;
if( vlc_stream_Read( p_demux->s, &xa, 24 ) < 24 )
{
free( p_sys );
return VLC_EGENERIC;
}
es_format_t fmt;
es_format_Init( &fmt, AUDIO_ES, VLC_FOURCC('X','A','J',0) );
msg_Dbg( p_demux, "assuming EA ADPCM audio codec" );
fmt.audio.i_rate = GetDWLE( &xa.nSamplesPerSec );
fmt.audio.i_bytes_per_frame = 15 * GetWLE( &xa.nChannels );
fmt.audio.i_frame_length = FRAME_LENGTH;
fmt.audio.i_channels = GetWLE ( &xa.nChannels );
fmt.audio.i_blockalign = fmt.audio.i_bytes_per_frame;
fmt.audio.i_bitspersample = GetWLE( &xa.wBitsPerSample );
fmt.i_bitrate = (fmt.audio.i_rate * fmt.audio.i_bytes_per_frame * 8)
/ fmt.audio.i_frame_length;
p_sys->i_data_offset = vlc_stream_Tell( p_demux->s );
/* FIXME: better computation */
p_sys->i_data_size = xa.iSize * 15 / 56;
/* How many frames per block (1:1 is too CPU intensive) */
p_sys->i_block_frames = fmt.audio.i_rate / (FRAME_LENGTH * 20) + 1;
p_sys->i_frame_size = fmt.audio.i_bytes_per_frame;
p_sys->i_bitrate = fmt.i_bitrate;
msg_Dbg( p_demux, "fourcc: %4.4s, channels: %d, "
"freq: %d Hz, bitrate: %dKo/s, blockalign: %d",
(char *)&fmt.i_codec, fmt.audio.i_channels, fmt.audio.i_rate,
fmt.i_bitrate / 8192, fmt.audio.i_blockalign );
if( fmt.audio.i_rate == 0 || fmt.audio.i_channels == 0
|| fmt.audio.i_bitspersample != 16 )
{
free( p_sys );
return VLC_EGENERIC;
}
p_sys->p_es = es_out_Add( p_demux->out, &fmt );
date_Init( &p_sys->pts, fmt.audio.i_rate, 1 );
date_Set( &p_sys->pts, VLC_TS_0 );
p_demux->pf_demux = Demux;
p_demux->pf_control = Control;
p_demux->p_sys = p_sys;
return VLC_SUCCESS;
}
/*****************************************************************************
* Demux: read packet and send them to decoders
*****************************************************************************
* Returns -1 in case of error, 0 in case of EOF, 1 otherwise
*****************************************************************************/
static int Demux( demux_t *p_demux )
{
demux_sys_t *p_sys = p_demux->p_sys;
block_t *p_block;
int64_t i_offset;
unsigned i_frames = p_sys->i_block_frames;
i_offset = vlc_stream_Tell( p_demux->s );
if( p_sys->i_data_size > 0 &&
i_offset >= p_sys->i_data_offset + p_sys->i_data_size )
{
/* EOF */
return 0;
}
p_block = vlc_stream_Block( p_demux->s, p_sys->i_frame_size * i_frames );
if( p_block == NULL )
{
msg_Warn( p_demux, "cannot read data" );
return 0;
}
i_frames = p_block->i_buffer / p_sys->i_frame_size;
p_block->i_dts = p_block->i_pts = date_Get( &p_sys->pts );
es_out_Control( p_demux->out, ES_OUT_SET_PCR, p_block->i_pts );
es_out_Send( p_demux->out, p_sys->p_es, p_block );
date_Increment( &p_sys->pts, i_frames * FRAME_LENGTH );
return 1;
}
/*****************************************************************************
* Close: frees unused data
*****************************************************************************/
static void Close ( vlc_object_t * p_this )
{
demux_sys_t *p_sys = ((demux_t *)p_this)->p_sys;
free( p_sys );
}
/*****************************************************************************
* Control:
*****************************************************************************/
static int Control( demux_t *p_demux, int i_query, va_list args )
{
demux_sys_t *p_sys = p_demux->p_sys;
return demux_vaControlHelper( p_demux->s, p_sys->i_data_offset,
p_sys->i_data_size ? p_sys->i_data_offset
+ p_sys->i_data_size : -1,
p_sys->i_bitrate, p_sys->i_frame_size,
i_query, args );
}
| r1k/vlc | modules/demux/xa.c | C | gpl-2.0 | 7,523 |
#include <string.h>
#include <stdlib.h>
#include "libterm.h"
#include "cursor.h"
#include "screen.h"
#include "bitarr.h"
int cursor_visibility(int tid, int sid, char visibility) {
if(SCR(tid, sid).curs_invisible != !visibility) {
SCR(tid, sid).curs_invisible = !visibility;
if(!record_update(tid, sid, visibility ? UPD_CURS : UPD_CURS_INVIS)) {
if(ltm_curerr.err_no == ESRCH) return 0;
else return -1;
}
}
return 0;
}
int cursor_abs_move(int tid, int sid, enum axis axis, ushort num) {
int ret = 0;
uint old;
SCR(tid, sid).curs_prev_not_set = 0;
switch(axis) {
case X:
old = SCR(tid, sid).cursor.x;
if(num < SCR(tid, sid).cols)
SCR(tid, sid).cursor.x = num;
else
SCR(tid, sid).cursor.x = SCR(tid, sid).cols-1;
if(old == SCR(tid, sid).cursor.x)
return 0;
break;
case Y:
old = SCR(tid, sid).cursor.y;
if(num < SCR(tid, sid).lines)
SCR(tid, sid).cursor.y = num;
else
SCR(tid, sid).cursor.y = SCR(tid, sid).lines-1;
if(old == SCR(tid, sid).cursor.y)
return 0;
break;
default:
LTM_ERR(EINVAL, "Invalid axis", error);
}
if(!record_update(tid, sid, UPD_CURS)) {
if(ltm_curerr.err_no == ESRCH) return 0;
else return -1;
}
error:
return ret;
}
int cursor_rel_move(int tid, int sid, enum direction direction, ushort num) {
int ret = 0;
if(!num) return 0;
switch(direction) {
case UP:
return cursor_abs_move(tid, sid, Y, num <= SCR(tid, sid).cursor.y ? SCR(tid, sid).cursor.y - num : 0);
case DOWN:
return cursor_abs_move(tid, sid, Y, SCR(tid, sid).cursor.y + num);
case LEFT:
return cursor_abs_move(tid, sid, X, num <= SCR(tid, sid).cursor.x ? SCR(tid, sid).cursor.x - num : 0);
case RIGHT:
return cursor_abs_move(tid, sid, X, SCR(tid, sid).cursor.x + num);
default:
LTM_ERR(EINVAL, "Invalid direction", error);
}
error:
return ret;
}
int cursor_horiz_tab(int tid, int sid) {
/* don't hardcode 8 here in the future? */
char dist = 8 - (SCR(tid, sid).cursor.x % 8);
return cursor_rel_move(tid, sid, RIGHT, dist);
}
int cursor_down(int tid, int sid) {
if(SCR(tid, sid).cursor.y == SCR(tid, sid).lines-1 && SCR(tid, sid).autoscroll)
return screen_scroll(tid, sid);
else
return cursor_rel_move(tid, sid, DOWN, 1);
}
int cursor_vertical_tab(int tid, int sid) {
if(cursor_down(tid, sid) == -1) return -1;
bitarr_unset_index(SCR(tid, sid).wrapped, SCR(tid, sid).cursor.y);
return 0;
}
int cursor_line_break(int tid, int sid) {
if(cursor_vertical_tab(tid, sid) == -1) return -1;
if(cursor_abs_move(tid, sid, X, 0) == -1) return -1;
return 0;
}
int cursor_wrap(int tid, int sid) {
if(cursor_down(tid, sid) == -1) return -1;
bitarr_set_index(SCR(tid, sid).wrapped, SCR(tid, sid).cursor.y);
if(cursor_abs_move(tid, sid, X, 0) == -1) return -1;
return 0;
}
int cursor_advance(int tid, int sid) {
if(SCR(tid, sid).cursor.x == SCR(tid, sid).cols-1) {
if(!SCR(tid, sid).curs_prev_not_set) {
SCR(tid, sid).curs_prev_not_set = 1;
return 0;
}
return cursor_wrap(tid, sid);
} else
return cursor_rel_move(tid, sid, RIGHT, 1);
}
| atrigent/libterm | src/cursor.c | C | gpl-2.0 | 3,083 |
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Implementation of the Transmission Control Protocol(TCP).
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
* Corey Minyard <wf-rch!minyard@relay.EU.net>
* Florian La Roche, <flla@stud.uni-sb.de>
* Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
* Linus Torvalds, <torvalds@cs.helsinki.fi>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Matthew Dillon, <dillon@apollo.west.oic.com>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Jorge Cwik, <jorge@laser.satlink.net>
*/
/*
* Changes: Pedro Roque : Retransmit queue handled by TCP.
* : Fragmentation on mtu decrease
* : Segment collapse on retransmit
* : AF independence
*
* Linus Torvalds : send_delayed_ack
* David S. Miller : Charge memory using the right skb
* during syn/ack processing.
* David S. Miller : Output engine completely rewritten.
* Andrea Arcangeli: SYNACK carry ts_recent in tsecr.
* Cacophonix Gaul : draft-minshall-nagle-01
* J Hadi Salim : ECN support
*
*/
#define pr_fmt(fmt) "TCP: " fmt
#include <net/mptcp.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <linux/compiler.h>
#include <linux/gfp.h>
#include <linux/module.h>
/* People can turn this off for buggy TCP's found in printers etc. */
int sysctl_tcp_retrans_collapse __read_mostly = 1;
/* People can turn this on to work with those rare, broken TCPs that
* interpret the window field as a signed quantity.
*/
int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
/* Default TSQ limit of two TSO segments */
int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
/* This limits the percentage of the congestion window which we
* will allow a single TSO frame to consume. Building TSO frames
* which are too large can cause TCP streams to be bursty.
*/
int sysctl_tcp_tso_win_divisor __read_mostly = 3;
int sysctl_tcp_mtu_probing __read_mostly = 0;
int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
int cnt = 0;
/* By default, RFC2861 behavior. */
int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
int push_one, gfp_t gfp);
/* Account for new data that has been sent to the network. */
void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
unsigned int prior_packets = tp->packets_out;
tcp_advance_send_head(sk, skb);
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
tp->packets_out += tcp_skb_pcount(skb);
if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
tcp_rearm_rto(sk);
}
}
/* SND.NXT, if window was not shrunk.
* If window has been shrunk, what should we make? It is not clear at all.
* Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
* Anything in between SND.UNA...SND.UNA+SND.WND also can be already
* invalid. OK, let's make this for now:
*/
static inline __u32 tcp_acceptable_seq(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
if (!before(tcp_wnd_end(tp), tp->snd_nxt))
return tp->snd_nxt;
else
return tcp_wnd_end(tp);
}
/* Calculate mss to advertise in SYN segment.
* RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
*
* 1. It is independent of path mtu.
* 2. Ideally, it is maximal possible segment size i.e. 65535-40.
* 3. For IPv4 it is reasonable to calculate it from maximal MTU of
* attached devices, because some buggy hosts are confused by
* large MSS.
* 4. We do not make 3, we advertise MSS, calculated from first
* hop device mtu, but allow to raise it to ip_rt_min_advmss.
* This may be overridden via information stored in routing table.
* 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
* probably even Jumbo".
*/
static __u16 tcp_advertise_mss(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct dst_entry *dst = __sk_dst_get(sk);
int mss = tp->advmss;
if (dst) {
unsigned int metric = dst_metric_advmss(dst);
if (metric < mss) {
mss = metric;
tp->advmss = mss;
}
}
return (__u16)mss;
}
/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
* This is the first part of cwnd validation mechanism. */
static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
{
struct tcp_sock *tp = tcp_sk(sk);
s32 delta = tcp_time_stamp - tp->lsndtime;
u32 restart_cwnd = tcp_init_cwnd(tp, dst);
u32 cwnd = tp->snd_cwnd;
tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
tp->snd_ssthresh = tcp_current_ssthresh(sk);
restart_cwnd = min(restart_cwnd, cwnd);
while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
cwnd >>= 1;
tp->snd_cwnd = max(cwnd, restart_cwnd);
tp->snd_cwnd_stamp = tcp_time_stamp;
tp->snd_cwnd_used = 0;
}
/* Congestion state accounting after a packet has been sent. */
static void tcp_event_data_sent(struct tcp_sock *tp,
struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const u32 now = tcp_time_stamp;
const struct dst_entry *dst = __sk_dst_get(sk);
if (sysctl_tcp_slow_start_after_idle &&
(!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
tcp_cwnd_restart(sk, __sk_dst_get(sk));
tp->lsndtime = now;
/* If it is a reply for ato after last received
* packet, enter pingpong mode.
*/
if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
(!dst || !dst_metric(dst, RTAX_QUICKACK)))
icsk->icsk_ack.pingpong = 1;
}
/* Account for an ACK we sent. */
static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
{
tcp_dec_quickack_mode(sk, pkts);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
u32 tcp_default_init_rwnd(u32 mss)
{
/* Initial receive window should be twice of TCP_INIT_CWND to
* enable proper sending of new unsent data during fast recovery
* (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
* limit when mss is larger than 1460.
*/
u32 init_rwnd = TCP_INIT_CWND * 2;
if (mss > 1460)
init_rwnd = max((1460 * init_rwnd) / mss, 2U);
return init_rwnd;
}
/* Determine a window scaling and initial window to offer.
* Based on the assumption that the given amount of space
* will be offered. Store the results in the tp structure.
* NOTE: for smooth operation initial space offering should
* be a multiple of mss if possible. We assume here that mss >= 1.
* This MUST be enforced by all callers.
*/
void tcp_select_initial_window(int __space, __u32 mss,
__u32 *rcv_wnd, __u32 *window_clamp,
int wscale_ok, __u8 *rcv_wscale,
__u32 init_rcv_wnd, const struct sock *sk)
{
unsigned int space;
if (tcp_sk(sk)->mpc)
mptcp_select_initial_window(&__space, window_clamp, sk);
space = (__space < 0 ? 0 : __space);
/* If no clamp set the clamp to the max possible scaled window */
if (*window_clamp == 0)
(*window_clamp) = (65535 << 14);
space = min(*window_clamp, space);
/* Quantize space offering to a multiple of mss if possible. */
if (space > mss)
space = (space / mss) * mss;
/* NOTE: offering an initial window larger than 32767
* will break some buggy TCP stacks. If the admin tells us
* it is likely we could be speaking with such a buggy stack
* we will truncate our initial window offering to 32K-1
* unless the remote has sent us a window scaling option,
* which we interpret as a sign the remote TCP is not
* misinterpreting the window field as a signed quantity.
*/
if (sysctl_tcp_workaround_signed_windows)
(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
else
(*rcv_wnd) = space;
(*rcv_wscale) = 0;
if (wscale_ok) {
/* Set window scaling on max possible window
* See RFC1323 for an explanation of the limit to 14
*/
space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
space = min_t(u32, space, *window_clamp);
while (space > 65535 && (*rcv_wscale) < 14) {
space >>= 1;
(*rcv_wscale)++;
}
}
if (mss > (1 << *rcv_wscale)) {
if (!init_rcv_wnd) /* Use default unless specified otherwise */
init_rcv_wnd = tcp_default_init_rwnd(mss);
*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
}
/* Set the clamp no higher than max representable value */
(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
}
EXPORT_SYMBOL(tcp_select_initial_window);
/* Chose a new window to advertise, update state in tcp_sock for the
* socket, and return result with RFC1323 scaling applied. The return
* value can be stuffed directly into th->window for an outgoing
* frame.
*/
static u16 tcp_select_window(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
/* The window must never shrink at the meta-level. At the subflow we
* have to allow this. Otherwise we may announce a window too large
* for the current meta-level sk_rcvbuf.
*/
u32 cur_win = tcp_receive_window(tp->mpc ? tcp_sk(mptcp_meta_sk(sk)) : tp);
u32 new_win = __tcp_select_window(sk);
/* Never shrink the offered window */
if (new_win < cur_win) {
/* Danger Will Robinson!
* Don't update rcv_wup/rcv_wnd here or else
* we will not be able to advertise a zero
* window in time. --DaveM
*
* Relax Will Robinson.
*/
new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
}
if (tp->mpc) {
mptcp_meta_tp(tp)->rcv_wnd = new_win;
mptcp_meta_tp(tp)->rcv_wup = mptcp_meta_tp(tp)->rcv_nxt;
}
tp->rcv_wnd = new_win;
tp->rcv_wup = tp->rcv_nxt;
/* Make sure we do not exceed the maximum possible
* scaled window.
*/
if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
new_win = min(new_win, MAX_TCP_WINDOW);
else
new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
/* RFC1323 scaling applied */
new_win >>= tp->rx_opt.rcv_wscale;
/* If we advertise zero window, disable fast path. */
if (new_win == 0)
tp->pred_flags = 0;
return new_win;
}
/* Packet ECN state for a SYN-ACK */
static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
if (!(tp->ecn_flags & TCP_ECN_OK))
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
}
/* Packet ECN state for a SYN. */
static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->ecn_flags = 0;
if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
tp->ecn_flags = TCP_ECN_OK;
}
}
static __inline__ void
TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
{
if (inet_rsk(req)->ecn_ok)
th->ece = 1;
}
/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
* be sent.
*/
static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
int tcp_header_len)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->ecn_flags & TCP_ECN_OK) {
/* Not-retransmitted data segment: set ECT and inject CWR. */
if (skb->len != tcp_header_len &&
!before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
INET_ECN_xmit(sk);
if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
tcp_hdr(skb)->cwr = 1;
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
}
} else {
/* ACK or retransmitted segment: clear ECT|CE */
INET_ECN_dontxmit(sk);
}
if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
tcp_hdr(skb)->ece = 1;
}
}
/* Constructs common control bits of non-data skb. If SYN/FIN is present,
* auto increment end seqno.
*/
void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
{
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
TCP_SKB_CB(skb)->tcp_flags = flags;
TCP_SKB_CB(skb)->sacked = 0;
skb_shinfo(skb)->gso_segs = 1;
skb_shinfo(skb)->gso_size = 0;
skb_shinfo(skb)->gso_type = 0;
TCP_SKB_CB(skb)->seq = seq;
if (flags & (TCPHDR_SYN | TCPHDR_FIN))
seq++;
TCP_SKB_CB(skb)->end_seq = seq;
}
bool tcp_urg_mode(const struct tcp_sock *tp)
{
return tp->snd_una != tp->snd_up;
}
#define OPTION_SACK_ADVERTISE (1 << 0)
#define OPTION_TS (1 << 1)
#define OPTION_MD5 (1 << 2)
#define OPTION_WSCALE (1 << 3)
#define OPTION_FAST_OPEN_COOKIE (1 << 8)
/* Before adding here - take a look at OPTION_MPTCP in include/net/mptcp.h */
/* Write previously computed TCP options to the packet.
*
* Beware: Something in the Internet is very sensitive to the ordering of
* TCP options, we learned this through the hard way, so be careful here.
* Luckily we can at least blame others for their non-compliance but from
* inter-operatibility perspective it seems that we're somewhat stuck with
* the ordering which we have been using if we want to keep working with
* those broken things (not that it currently hurts anybody as there isn't
* particular reason why the ordering would need to be changed).
*
* At least SACK_PERM as the first option is known to lead to a disaster
* (but it may well be that other scenarios fail similarly).
*/
static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
struct tcp_out_options *opts, struct sk_buff *skb)
{
u16 options = opts->options; /* mungable copy */
if (unlikely(OPTION_MD5 & options)) {
*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
/* overload cookie hash location */
opts->hash_location = (__u8 *)ptr;
ptr += 4;
}
if (unlikely(opts->mss)) {
*ptr++ = htonl((TCPOPT_MSS << 24) |
(TCPOLEN_MSS << 16) |
opts->mss);
}
if (likely(OPTION_TS & options)) {
if (unlikely(OPTION_SACK_ADVERTISE & options)) {
*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
(TCPOLEN_SACK_PERM << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP);
options &= ~OPTION_SACK_ADVERTISE;
} else {
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP);
}
*ptr++ = htonl(opts->tsval);
*ptr++ = htonl(opts->tsecr);
}
if (unlikely(OPTION_SACK_ADVERTISE & options)) {
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_SACK_PERM << 8) |
TCPOLEN_SACK_PERM);
}
if (unlikely(OPTION_WSCALE & options)) {
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_WINDOW << 16) |
(TCPOLEN_WINDOW << 8) |
opts->ws);
}
if (unlikely(opts->num_sack_blocks)) {
struct tcp_sack_block *sp = tp->rx_opt.dsack ?
tp->duplicate_sack : tp->selective_acks;
int this_sack;
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_SACK << 8) |
(TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
TCPOLEN_SACK_PERBLOCK)));
for (this_sack = 0; this_sack < opts->num_sack_blocks;
++this_sack) {
*ptr++ = htonl(sp[this_sack].start_seq);
*ptr++ = htonl(sp[this_sack].end_seq);
}
tp->rx_opt.dsack = 0;
}
if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
*ptr++ = htonl((TCPOPT_EXP << 24) |
((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
TCPOPT_FASTOPEN_MAGIC);
memcpy(ptr, foc->val, foc->len);
if ((foc->len & 3) == 2) {
u8 *align = ((u8 *)ptr) + foc->len;
align[0] = align[1] = TCPOPT_NOP;
}
ptr += (foc->len + 3) >> 2;
}
if (unlikely(OPTION_MPTCP & opts->options))
mptcp_options_write(ptr, tp, opts, skb);
}
/* Compute TCP options for SYN packets. This is not the final
* network wire format yet.
*/
static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5)
{
struct tcp_sock *tp = tcp_sk(sk);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
struct tcp_fastopen_request *fastopen = tp->fastopen_req;
#ifdef CONFIG_TCP_MD5SIG
*md5 = tp->af_specific->md5_lookup(sk, sk);
if (*md5) {
opts->options |= OPTION_MD5;
remaining -= TCPOLEN_MD5SIG_ALIGNED;
}
#else
*md5 = NULL;
#endif
/* We always get an MSS option. The option bytes which will be seen in
* normal data packets should timestamps be used, must be in the MSS
* advertised. But we subtract them from tp->mss_cache so that
* calculations in tcp_sendmsg are simpler etc. So account for this
* fact here if necessary. If we don't do this correctly, as a
* receiver we won't recognize data packets as being full sized when we
* should, and thus we won't abide by the delayed ACK rules correctly.
* SACKs don't matter, we never delay an ACK when we have any of those
* going out. */
opts->mss = tcp_advertise_mss(sk);
remaining -= TCPOLEN_MSS_ALIGNED;
if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
opts->options |= OPTION_TS;
opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
opts->tsecr = tp->rx_opt.ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
if (likely(sysctl_tcp_window_scaling)) {
opts->ws = tp->rx_opt.rcv_wscale;
opts->options |= OPTION_WSCALE;
remaining -= TCPOLEN_WSCALE_ALIGNED;
}
if (likely(sysctl_tcp_sack)) {
opts->options |= OPTION_SACK_ADVERTISE;
if (unlikely(!(OPTION_TS & opts->options)))
remaining -= TCPOLEN_SACKPERM_ALIGNED;
}
if (tp->request_mptcp || tp->mpc)
mptcp_syn_options(sk, opts, &remaining);
if (fastopen && fastopen->cookie.len >= 0) {
u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
need = (need + 3) & ~3U; /* Align to 32 bits */
if (remaining >= need) {
opts->options |= OPTION_FAST_OPEN_COOKIE;
opts->fastopen_cookie = &fastopen->cookie;
remaining -= need;
tp->syn_fastopen = 1;
}
}
return MAX_TCP_OPTION_SPACE - remaining;
}
/* Set up TCP options for SYN-ACKs. */
static unsigned int tcp_synack_options(struct sock *sk,
struct request_sock *req,
unsigned int mss, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5,
struct tcp_fastopen_cookie *foc)
{
struct inet_request_sock *ireq = inet_rsk(req);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
#ifdef CONFIG_TCP_MD5SIG
*md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
if (*md5) {
opts->options |= OPTION_MD5;
remaining -= TCPOLEN_MD5SIG_ALIGNED;
/* We can't fit any SACK blocks in a packet with MD5 + TS
* options. There was discussion about disabling SACK
* rather than TS in order to fit in better with old,
* buggy kernels, but that was deemed to be unnecessary.
*/
ireq->tstamp_ok &= !ireq->sack_ok;
}
#else
*md5 = NULL;
#endif
/* We always send an MSS option. */
opts->mss = mss;
remaining -= TCPOLEN_MSS_ALIGNED;
if (likely(ireq->wscale_ok)) {
opts->ws = ireq->rcv_wscale;
opts->options |= OPTION_WSCALE;
remaining -= TCPOLEN_WSCALE_ALIGNED;
}
if (likely(ireq->tstamp_ok)) {
opts->options |= OPTION_TS;
opts->tsval = TCP_SKB_CB(skb)->when;
opts->tsecr = req->ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
if (likely(ireq->sack_ok)) {
opts->options |= OPTION_SACK_ADVERTISE;
if (unlikely(!ireq->tstamp_ok))
remaining -= TCPOLEN_SACKPERM_ALIGNED;
}
if (foc != NULL) {
u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
need = (need + 3) & ~3U; /* Align to 32 bits */
if (remaining >= need) {
opts->options |= OPTION_FAST_OPEN_COOKIE;
opts->fastopen_cookie = foc;
remaining -= need;
}
}
if (tcp_rsk(req)->saw_mpc)
mptcp_synack_options(req, opts, &remaining);
return MAX_TCP_OPTION_SPACE - remaining;
}
/* Compute TCP options for ESTABLISHED sockets. This is not the
* final wire format yet.
*/
static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5)
{
struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
struct tcp_sock *tp = tcp_sk(sk);
unsigned int size = 0;
unsigned int eff_sacks;
#ifdef CONFIG_TCP_MD5SIG
*md5 = tp->af_specific->md5_lookup(sk, sk);
if (unlikely(*md5)) {
opts->options |= OPTION_MD5;
size += TCPOLEN_MD5SIG_ALIGNED;
}
#else
*md5 = NULL;
#endif
if (likely(tp->rx_opt.tstamp_ok)) {
opts->options |= OPTION_TS;
opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
opts->tsecr = tp->rx_opt.ts_recent;
size += TCPOLEN_TSTAMP_ALIGNED;
}
if (tp->mpc)
mptcp_established_options(sk, skb, opts, &size);
eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
if (unlikely(eff_sacks)) {
const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
if (remaining < TCPOLEN_SACK_BASE_ALIGNED)
opts->num_sack_blocks = 0;
else
opts->num_sack_blocks =
min_t(unsigned int, eff_sacks,
(remaining - TCPOLEN_SACK_BASE_ALIGNED) /
TCPOLEN_SACK_PERBLOCK);
if (opts->num_sack_blocks)
size += TCPOLEN_SACK_BASE_ALIGNED +
opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
}
return size;
}
/* TCP SMALL QUEUES (TSQ)
*
* TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
* to reduce RTT and bufferbloat.
* We do this using a special skb destructor (tcp_wfree).
*
* Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
* needs to be reallocated in a driver.
* The invariant being skb->truesize substracted from sk->sk_wmem_alloc
*
* Since transmit from skb destructor is forbidden, we use a tasklet
* to process all sockets that eventually need to send more skbs.
* We use one tasklet per cpu, with its own queue of sockets.
*/
struct tsq_tasklet {
struct tasklet_struct tasklet;
struct list_head head; /* queue of tcp sockets */
};
static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
static void tcp_tsq_handler(struct sock *sk)
{
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
}
/*
* One tasklest per cpu tries to send more skbs.
* We run in tasklet context but need to disable irqs when
* transfering tsq->head because tcp_wfree() might
* interrupt us (non NAPI drivers)
*/
static void tcp_tasklet_func(unsigned long data)
{
struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
LIST_HEAD(list);
unsigned long flags;
struct list_head *q, *n;
struct tcp_sock *tp;
struct sock *sk, *meta_sk;
local_irq_save(flags);
list_splice_init(&tsq->head, &list);
local_irq_restore(flags);
list_for_each_safe(q, n, &list) {
tp = list_entry(q, struct tcp_sock, tsq_node);
list_del(&tp->tsq_node);
sk = (struct sock *)tp;
meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
bh_lock_sock(meta_sk);
if (!sock_owned_by_user(meta_sk)) {
tcp_tsq_handler(sk);
if (tp->mpc)
tcp_tsq_handler(meta_sk);
} else {
/* defer the work to tcp_release_cb() */
set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
/* For MPTCP, we set the tsq-bit on the meta, and the
* subflow as we don't know if the limitation happened
* while inside mptcp_write_xmit or during tcp_write_xmit.
*/
if (tp->mpc) {
set_bit(TCP_TSQ_DEFERRED, &tcp_sk(meta_sk)->tsq_flags);
mptcp_tsq_flags(sk);
}
}
bh_unlock_sock(meta_sk);
clear_bit(TSQ_QUEUED, &tp->tsq_flags);
sk_free(sk);
}
}
#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
(1UL << TCP_WRITE_TIMER_DEFERRED) | \
(1UL << TCP_DELACK_TIMER_DEFERRED) | \
(1UL << TCP_MTU_REDUCED_DEFERRED) | \
(1UL << MPTCP_PATH_MANAGER) | \
(1UL << MPTCP_SUB_DEFERRED))
/**
* tcp_release_cb - tcp release_sock() callback
* @sk: socket
*
* called from release_sock() to perform protocol dependent
* actions before socket release.
*/
void tcp_release_cb(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
unsigned long flags, nflags;
/* perform an atomic operation only if at least one flag is set */
do {
flags = tp->tsq_flags;
if (!(flags & TCP_DEFERRED_ALL))
return;
nflags = flags & ~TCP_DEFERRED_ALL;
} while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
if (flags & (1UL << TCP_TSQ_DEFERRED))
tcp_tsq_handler(sk);
if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
tcp_write_timer_handler(sk);
__sock_put(sk);
}
if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
tcp_delack_timer_handler(sk);
__sock_put(sk);
}
if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
sk->sk_prot->mtu_reduced(sk);
__sock_put(sk);
}
if (flags & (1UL << MPTCP_PATH_MANAGER)) {
if (tcp_sk(sk)->mpcb->pm_ops->release_sock)
tcp_sk(sk)->mpcb->pm_ops->release_sock(sk);
__sock_put(sk);
}
if (flags & (1UL << MPTCP_SUB_DEFERRED))
mptcp_tsq_sub_deferred(sk);
}
EXPORT_SYMBOL(tcp_release_cb);
void __init tcp_tasklet_init(void)
{
int i;
for_each_possible_cpu(i) {
struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
INIT_LIST_HEAD(&tsq->head);
tasklet_init(&tsq->tasklet,
tcp_tasklet_func,
(unsigned long)tsq);
}
}
/*
* Write buffer destructor automatically called from kfree_skb.
* We cant xmit new skbs from this context, as we might already
* hold qdisc lock.
*/
void tcp_wfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct tcp_sock *tp = tcp_sk(sk);
if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
!test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
unsigned long flags;
struct tsq_tasklet *tsq;
/* Keep a ref on socket.
* This last ref will be released in tcp_tasklet_func()
*/
atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
/* queue this socket to tasklet queue */
local_irq_save(flags);
tsq = &__get_cpu_var(tsq_tasklet);
list_add(&tp->tsq_node, &tsq->head);
tasklet_schedule(&tsq->tasklet);
local_irq_restore(flags);
} else {
sock_wfree(skb);
}
}
/* This routine actually transmits TCP packets queued in by
* tcp_do_sendmsg(). This is used by both the initial
* transmission and possible later retransmissions.
* All SKB's seen here are completely headerless. It is our
* job to build the TCP header, and pass the packet down to
* IP so it can do the same plus pass the packet off to the
* device.
*
* We are working here with either a clone of the original
* SKB, or a fresh unique copy made by the retransmit engine.
*/
int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
gfp_t gfp_mask)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet;
struct tcp_sock *tp;
struct tcp_skb_cb *tcb;
struct tcp_out_options opts;
unsigned int tcp_options_size, tcp_header_size;
struct tcp_md5sig_key *md5;
struct tcphdr *th;
int err;
BUG_ON(!skb || !tcp_skb_pcount(skb));
/* If congestion control is doing timestamping, we must
* take such a timestamp before we potentially clone/copy.
*/
if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
__net_timestamp(skb);
if (likely(clone_it)) {
const struct sk_buff *fclone = skb + 1;
if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
fclone->fclone == SKB_FCLONE_CLONE))
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
if (unlikely(skb_cloned(skb))) {
struct sk_buff *newskb;
if (mptcp_is_data_seq(skb))
skb_push(skb, MPTCP_SUB_LEN_DSS_ALIGN +
MPTCP_SUB_LEN_ACK_ALIGN +
MPTCP_SUB_LEN_SEQ_ALIGN);
newskb = pskb_copy(skb, gfp_mask);
if (mptcp_is_data_seq(skb)) {
skb_pull(skb, MPTCP_SUB_LEN_DSS_ALIGN +
MPTCP_SUB_LEN_ACK_ALIGN +
MPTCP_SUB_LEN_SEQ_ALIGN);
if (newskb)
skb_pull(newskb, MPTCP_SUB_LEN_DSS_ALIGN +
MPTCP_SUB_LEN_ACK_ALIGN +
MPTCP_SUB_LEN_SEQ_ALIGN);
}
skb = newskb;
} else {
skb = skb_clone(skb, gfp_mask);
}
if (unlikely(!skb))
return -ENOBUFS;
}
inet = inet_sk(sk);
tp = tcp_sk(sk);
tcb = TCP_SKB_CB(skb);
memset(&opts, 0, sizeof(opts));
if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
else
tcp_options_size = tcp_established_options(sk, skb, &opts,
&md5);
tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
/* if no packet is in qdisc/device queue, then allow XPS to select
* another queue.
*/
skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
skb_orphan(skb);
skb->sk = sk;
skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ?
tcp_wfree : sock_wfree;
atomic_add(skb->truesize, &sk->sk_wmem_alloc);
/* Build TCP header and checksum it. */
th = tcp_hdr(skb);
th->source = inet->inet_sport;
th->dest = inet->inet_dport;
//printf("[transmit:]%5u, %5u\n", ntohs(inet->inet_sport), ntohs(inet->inet_dport));
th->seq = htonl(tcb->seq);
th->ack_seq = htonl(tp->rcv_nxt);
*(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
tcb->tcp_flags);
if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
/* RFC1323: The window in SYN & SYN/ACK segments
* is never scaled.
*/
th->window = htons(min(tp->rcv_wnd, 65535U));
} else {
th->window = htons(tcp_select_window(sk));
}
th->check = 0;
th->urg_ptr = 0;
/* The urg_mode check is necessary during a below snd_una win probe */
if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
if (before(tp->snd_up, tcb->seq + 0x10000)) {
th->urg_ptr = htons(tp->snd_up - tcb->seq);
th->urg = 1;
} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
th->urg_ptr = htons(0xFFFF);
th->urg = 1;
}
}
tcp_options_write((__be32 *)(th + 1), tp, &opts, skb);
if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
TCP_ECN_send(sk, skb, tcp_header_size);
#ifdef CONFIG_TCP_MD5SIG
/* Calculate the MD5 hash, as we have all we need now */
if (md5) {
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
tp->af_specific->calc_md5_hash(opts.hash_location,
md5, sk, NULL, skb);
}
#endif
icsk->icsk_af_ops->send_check(sk, skb);
if (likely(tcb->tcp_flags & TCPHDR_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
if (skb->len != tcp_header_size)
tcp_event_data_sent(tp, sk);
if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
tcp_skb_pcount(skb));
/*
if(sk->__sk_common.lane_info == 0)
printf("[transmit_skb]lane:%d, is_path:%d, %d, %d\n", sk->__sk_common.lane_info, sk->__sk_common.is_path, sk->__sk_common.skc_daddr, sk->__sk_common.skc_rcv_saddr);
*/
if(sk->__sk_common.is_path == 1){
//printf("[transmit_skb]lane:%d, is_path:%d, %d, %d\n", sk->__sk_common.lane_info, sk->__sk_common.is_path, sk->__sk_common.skc_daddr, sk->__sk_common.skc_rcv_saddr);
if(sk->__sk_common.lane_info == 0){
tcp_sk(sk)->snd_cwnd = 0;
}
//printf("hit!:%d\n", tcp_sk(sk)->snd_cwnd);
}
else{
if(sk->__sk_common.lane_info == 1){
tcp_sk(sk)->snd_cwnd = 2;
}
}
err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
if (likely(err <= 0))
return err;
tcp_enter_cwr(sk, 1);
return net_xmit_eval(err);
}
/* This routine just queues the buffer for sending.
*
* NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
* otherwise socket can stall.
*/
void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
/* Advance write_seq and place onto the write_queue. */
tp->write_seq = TCP_SKB_CB(skb)->end_seq;
skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
sk->sk_wmem_queued += skb->truesize;
sk_mem_charge(sk, skb->truesize);
}
/* Initialize TSO segments for a packet. */
void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
if (skb->len <= mss_now || (is_meta_sk(sk) && !mptcp_sk_can_gso(sk)) ||
(!is_meta_sk(sk) && !sk_can_gso(sk)) || skb->ip_summed == CHECKSUM_NONE) {
/* Avoid the costly divide in the normal
* non-TSO case.
*/
skb_shinfo(skb)->gso_segs = 1;
skb_shinfo(skb)->gso_size = 0;
skb_shinfo(skb)->gso_type = 0;
} else {
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
skb_shinfo(skb)->gso_size = mss_now;
skb_shinfo(skb)->gso_type = sk->sk_gso_type;
}
}
/* When a modification to fackets out becomes necessary, we need to check
* skb is counted to fackets_out or not.
*/
static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
int decr)
{
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->sacked_out || tcp_is_reno(tp))
return;
if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
tp->fackets_out -= decr;
}
/* Pcount in the middle of the write queue got changed, we need to do various
* tweaks to fix counters
*/
void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->packets_out -= decr;
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
tp->sacked_out -= decr;
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
tp->retrans_out -= decr;
if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
tp->lost_out -= decr;
/* Reno case is special. Sigh... */
if (tcp_is_reno(tp) && decr > 0)
tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
tcp_adjust_fackets_out(sk, skb, decr);
if (tp->lost_skb_hint &&
before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
(tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
tp->lost_cnt_hint -= decr;
tcp_verify_left_out(tp);
}
/* Function to create two new TCP segments. Shrinks the given segment
* to the specified size and appends a new segment with the rest of the
* packet to the list. This won't be called frequently, I hope.
* Remember, these are still headerless SKBs at this point.
*/
int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
unsigned int mss_now)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff;
int nsize, old_factor;
int nlen;
u8 flags;
if (tcp_sk(sk)->mpc && mptcp_is_data_seq(skb))
mptcp_fragment(sk, skb, len, mss_now, 0);
if (WARN_ON(len > skb->len))
return -EINVAL;
nsize = skb_headlen(skb) - len;
if (nsize < 0)
nsize = 0;
if (skb_cloned(skb) &&
skb_is_nonlinear(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return -ENOMEM;
/* Get a new skb... force flag on. */
buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
if (buff == NULL)
return -ENOMEM; /* We'll just try again later. */
sk->sk_wmem_queued += buff->truesize;
sk_mem_charge(sk, buff->truesize);
nlen = skb->len - len - nsize;
buff->truesize += nlen;
skb->truesize -= nlen;
/* Correct the sequence numbers. */
TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
/* PSH and FIN should only be set in the second packet. */
flags = TCP_SKB_CB(skb)->tcp_flags;
TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
TCP_SKB_CB(buff)->tcp_flags = flags;
TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
/* Copy and checksum data tail into the new buffer. */
buff->csum = csum_partial_copy_nocheck(skb->data + len,
skb_put(buff, nsize),
nsize, 0);
skb_trim(skb, len);
skb->csum = csum_block_sub(skb->csum, buff->csum, len);
} else {
skb->ip_summed = CHECKSUM_PARTIAL;
skb_split(skb, buff, len);
}
buff->ip_summed = skb->ip_summed;
/* Looks stupid, but our code really uses when of
* skbs, which it never sent before. --ANK
*/
TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
buff->tstamp = skb->tstamp;
old_factor = tcp_skb_pcount(skb);
/* Fix up tso_factor for both original and new SKB. */
tcp_set_skb_tso_segs(sk, skb, mss_now);
tcp_set_skb_tso_segs(sk, buff, mss_now);
/* If this packet has been sent out already, we must
* adjust the various packet counters.
*/
if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
int diff = old_factor - tcp_skb_pcount(skb) -
tcp_skb_pcount(buff);
if (diff)
tcp_adjust_pcount(sk, skb, diff);
}
/* Link BUFF into the send queue. */
skb_header_release(buff);
tcp_insert_write_queue_after(skb, buff, sk);
return 0;
}
/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
* eventually). The difference is that pulled data not copied, but
* immediately discarded.
*/
void __pskb_trim_head(struct sk_buff *skb, int len)
{
int i, k, eat;
eat = min_t(int, len, skb_headlen(skb));
if (eat) {
__skb_pull(skb, eat);
len -= eat;
if (!len)
return;
}
eat = len;
k = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (size <= eat) {
skb_frag_unref(skb, i);
eat -= size;
} else {
skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
if (eat) {
skb_shinfo(skb)->frags[k].page_offset += eat;
skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
eat = 0;
}
k++;
}
}
skb_shinfo(skb)->nr_frags = k;
skb_reset_tail_pointer(skb);
skb->data_len -= len;
skb->len = skb->data_len;
}
/* Remove acked data from a packet in the transmit queue. */
int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
{
if (tcp_sk(sk)->mpc && !is_meta_sk(sk) && mptcp_is_data_seq(skb))
return mptcp_trim_head(sk, skb, len);
if (skb_unclone(skb, GFP_ATOMIC))
return -ENOMEM;
__pskb_trim_head(skb, len);
TCP_SKB_CB(skb)->seq += len;
skb->ip_summed = CHECKSUM_PARTIAL;
skb->truesize -= len;
sk->sk_wmem_queued -= len;
sk_mem_uncharge(sk, len);
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
/* Any change of skb->len requires recalculation of tso factor. */
if (tcp_skb_pcount(skb) > 1)
tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
#ifdef CONFIG_MPTCP
/* Some data got acked - we assume that the seq-number reached the dest.
* Anyway, our MPTCP-option has been trimmed above - we lost it here.
* Only remove the SEQ if the call does not come from a meta retransmit.
*/
if (tcp_sk(sk)->mpc && !is_meta_sk(sk))
TCP_SKB_CB(skb)->mptcp_flags &= ~MPTCPHDR_SEQ;
#endif
return 0;
}
/* Calculate MSS not accounting any TCP options. */
static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
int mss_now;
/* Calculate base mss without TCP options:
It is MMS_S - sizeof(tcphdr) of rfc1122
*/
mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
if (icsk->icsk_af_ops->net_frag_header_len) {
const struct dst_entry *dst = __sk_dst_get(sk);
if (dst && dst_allfrag(dst))
mss_now -= icsk->icsk_af_ops->net_frag_header_len;
}
/* Clamp it (mss_clamp does not include tcp options) */
if (mss_now > tp->rx_opt.mss_clamp)
mss_now = tp->rx_opt.mss_clamp;
/* Now subtract optional transport overhead */
mss_now -= icsk->icsk_ext_hdr_len;
/* Then reserve room for full set of TCP options and 8 bytes of data */
if (mss_now < 48)
mss_now = 48;
return mss_now;
}
/* Calculate MSS. Not accounting for SACKs here. */
int tcp_mtu_to_mss(struct sock *sk, int pmtu)
{
/* Subtract TCP options size, not including SACKs */
return __tcp_mtu_to_mss(sk, pmtu) -
(tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
}
/* Inverse of above */
int tcp_mss_to_mtu(struct sock *sk, int mss)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
int mtu;
mtu = mss +
tp->tcp_header_len +
icsk->icsk_ext_hdr_len +
icsk->icsk_af_ops->net_header_len;
/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
if (icsk->icsk_af_ops->net_frag_header_len) {
const struct dst_entry *dst = __sk_dst_get(sk);
if (dst && dst_allfrag(dst))
mtu += icsk->icsk_af_ops->net_frag_header_len;
}
return mtu;
}
/* MTU probing init per socket */
void tcp_mtup_init(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
icsk->icsk_af_ops->net_header_len;
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
icsk->icsk_mtup.probe_size = 0;
}
EXPORT_SYMBOL(tcp_mtup_init);
/* This function synchronize snd mss to current pmtu/exthdr set.
tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
for TCP options, but includes only bare TCP header.
tp->rx_opt.mss_clamp is mss negotiated at connection setup.
It is minimum of user_mss and mss received with SYN.
It also does not include TCP options.
inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
tp->mss_cache is current effective sending mss, including
all tcp options except for SACKs. It is evaluated,
taking into account current pmtu, but never exceeds
tp->rx_opt.mss_clamp.
NOTE1. rfc1122 clearly states that advertised MSS
DOES NOT include either tcp or ip options.
NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
are READ ONLY outside this function. --ANK (980731)
*/
unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int mss_now;
if (icsk->icsk_mtup.search_high > pmtu)
icsk->icsk_mtup.search_high = pmtu;
mss_now = tcp_mtu_to_mss(sk, pmtu);
mss_now = tcp_bound_to_half_wnd(tp, mss_now);
/* And store cached results */
icsk->icsk_pmtu_cookie = pmtu;
if (icsk->icsk_mtup.enabled)
mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
tp->mss_cache = mss_now;
return mss_now;
}
EXPORT_SYMBOL(tcp_sync_mss);
/* Compute the current effective MSS, taking SACKs and IP options,
* and even PMTU discovery events into account.
*/
unsigned int tcp_current_mss(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct dst_entry *dst = __sk_dst_get(sk);
u32 mss_now;
unsigned int header_len;
struct tcp_out_options opts;
struct tcp_md5sig_key *md5;
mss_now = tp->mss_cache;
if (dst) {
u32 mtu = dst_mtu(dst);
if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
mss_now = tcp_sync_mss(sk, mtu);
}
header_len = tcp_established_options(sk, NULL, &opts, &md5) +
sizeof(struct tcphdr);
/* The mss_cache is sized based on tp->tcp_header_len, which assumes
* some common options. If this is an odd packet (because we have SACK
* blocks etc) then our calculated header_len will be different, and
* we have to adjust mss_now correspondingly */
if (header_len != tp->tcp_header_len) {
int delta = (int) header_len - tp->tcp_header_len;
mss_now -= delta;
}
return mss_now;
}
/* Congestion window validation. (RFC2861) */
void tcp_cwnd_validate(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->packets_out >= tp->snd_cwnd) {
/* Network is feed fully. */
tp->snd_cwnd_used = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
} else {
/* Network starves. */
if (tp->packets_out > tp->snd_cwnd_used)
tp->snd_cwnd_used = tp->packets_out;
if (sysctl_tcp_slow_start_after_idle &&
(s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
tcp_cwnd_application_limited(sk);
}
}
/* Returns the portion of skb which can be sent right away without
* introducing MSS oddities to segment boundaries. In rare cases where
* mss_now != mss_cache, we will request caller to create a small skb
* per input skb which could be mostly avoided here (if desired).
*
* We explicitly want to create a request for splitting write queue tail
* to a small skb for Nagle purposes while avoiding unnecessary modulos,
* thus all the complexity (cwnd_len is always MSS multiple which we
* return whenever allowed by the other factors). Basically we need the
* modulo only when the receiver window alone is the limiting factor or
* when we would be allowed to send the split-due-to-Nagle skb fully.
*/
unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
unsigned int mss_now, unsigned int max_segs)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
u32 needed, window, max_len;
if (!tp->mpc)
window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
else
/* We need to evaluate the available space in the sending window
* at the subflow level. However, the subflow seq has not yet
* been set. Nevertheless we know that the caller will set it to
* write_seq.
*/
window = tcp_wnd_end(tp) - tp->write_seq;
max_len = mss_now * max_segs;
if (likely(max_len <= window && skb != tcp_write_queue_tail(meta_sk)))
return max_len;
needed = min(skb->len, window);
if (max_len <= needed)
return max_len;
return needed - needed % mss_now;
}
/* Can at least one segment of SKB be sent right now, according to the
* congestion window rules? If so, return how many segments are allowed.
*/
unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
const struct sk_buff *skb)
{
u32 in_flight, cwnd;
/* Don't be strict about the congestion window for the final FIN. */
if (skb &&
((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || mptcp_is_data_fin(skb)) &&
tcp_skb_pcount(skb) == 1)
return 1;
in_flight = tcp_packets_in_flight(tp);
cwnd = tp->snd_cwnd;
if (in_flight < cwnd)
return (cwnd - in_flight);
return 0;
}
/* Initialize TSO state of a skb.
* This must be invoked the first time we consider transmitting
* SKB onto the wire.
*/
int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
int tso_segs = tcp_skb_pcount(skb);
if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
tcp_set_skb_tso_segs(sk, skb, mss_now);
tso_segs = tcp_skb_pcount(skb);
}
return tso_segs;
}
/* Minshall's variant of the Nagle send check. */
static inline bool tcp_minshall_check(const struct tcp_sock *tp)
{
return after(tp->snd_sml, tp->snd_una) &&
!after(tp->snd_sml, tp->snd_nxt);
}
/* Return false, if packet can be sent now without violation Nagle's rules:
* 1. It is full sized.
* 2. Or it contains FIN. (already checked by caller)
* 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
* 4. Or TCP_CORK is not set, and all sent packets are ACKed.
* With Minshall's modification: all sent small packets are ACKed.
*/
static inline bool tcp_nagle_check(const struct tcp_sock *tp,
const struct sk_buff *skb,
unsigned int mss_now, int nonagle)
{
return skb->len < mss_now &&
((nonagle & TCP_NAGLE_CORK) ||
(!nonagle && tp->packets_out && tcp_minshall_check(tp)));
}
/* Return true if the Nagle test allows this packet to be
* sent now.
*/
bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
unsigned int cur_mss, int nonagle)
{
/* Nagle rule does not apply to frames, which sit in the middle of the
* write_queue (they have no chances to get new data).
*
* This is implemented in the callers, where they modify the 'nonagle'
* argument based upon the location of SKB in the send queue.
*/
if (nonagle & TCP_NAGLE_PUSH)
return true;
/* Don't use the nagle rule for urgent data (or for the final FIN). */
if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
mptcp_is_data_fin(skb))
return true;
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
return true;
return false;
}
/* Does at least the first segment of SKB fit into the send window? */
bool tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
unsigned int cur_mss)
{
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (skb->len > cur_mss)
end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
return !after(end_seq, tcp_wnd_end(tp));
}
/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
* should be put on the wire right now. If so, it returns the number of
* packets allowed by the congestion window.
*/
static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
unsigned int cur_mss, int nonagle)
{
const struct tcp_sock *tp = tcp_sk(sk);
unsigned int cwnd_quota;
tcp_init_tso_segs(sk, skb, cur_mss);
if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
return 0;
cwnd_quota = tcp_cwnd_test(tp, skb);
if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
cwnd_quota = 0;
return cwnd_quota;
}
/* Test if sending is allowed right now. */
bool tcp_may_send_now(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
return skb &&
tcp_snd_test(sk, skb, tcp_current_mss(sk),
(tcp_skb_is_last(sk, skb) ?
tp->nonagle : TCP_NAGLE_PUSH));
}
/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
* which is put after SKB on the list. It is very much like
* tcp_fragment() except that it may make several kinds of assumptions
* in order to speed up the splitting operation. In particular, we
* know that all the data is in scatter-gather pages, and that the
* packet has never been sent out before (and thus is not cloned).
*/
static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
unsigned int mss_now, gfp_t gfp)
{
struct sk_buff *buff;
int nlen = skb->len - len;
u8 flags;
if (tcp_sk(sk)->mpc && mptcp_is_data_seq(skb))
mptso_fragment(sk, skb, len, mss_now, gfp, 0);
/* All of a TSO frame must be composed of paged data. */
if (skb->len != skb->data_len)
return tcp_fragment(sk, skb, len, mss_now);
buff = sk_stream_alloc_skb(sk, 0, gfp);
if (unlikely(buff == NULL))
return -ENOMEM;
sk->sk_wmem_queued += buff->truesize;
sk_mem_charge(sk, buff->truesize);
buff->truesize += nlen;
skb->truesize -= nlen;
/* Correct the sequence numbers. */
TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
/* PSH and FIN should only be set in the second packet. */
flags = TCP_SKB_CB(skb)->tcp_flags;
TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
TCP_SKB_CB(buff)->tcp_flags = flags;
/* This packet was never sent out yet, so no SACK bits. */
TCP_SKB_CB(buff)->sacked = 0;
buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
skb_split(skb, buff, len);
/* Fix up tso_factor for both original and new SKB. */
tcp_set_skb_tso_segs(sk, skb, mss_now);
tcp_set_skb_tso_segs(sk, buff, mss_now);
/* Link BUFF into the send queue. */
skb_header_release(buff);
tcp_insert_write_queue_after(skb, buff, sk);
return 0;
}
/* Try to defer sending, if possible, in order to minimize the amount
* of TSO splitting we do. View it as a kind of TSO Nagle test.
*
* This algorithm is from John Heffner.
*/
bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
struct tcp_sock *meta_tp = tcp_sk(meta_sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 send_win, cong_win, limit, in_flight;
int win_divisor;
if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || mptcp_is_data_fin(skb))
goto send_now;
if (icsk->icsk_ca_state != TCP_CA_Open)
goto send_now;
/* Defer for less than two clock ticks. */
if (meta_tp->tso_deferred &&
(((u32)jiffies << 1) >> 1) - (meta_tp->tso_deferred >> 1) > 1)
goto send_now;
in_flight = tcp_packets_in_flight(tp);
BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
if (!tp->mpc)
send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
else
/* We need to evaluate the available space in the sending window
* at the subflow level. However, the subflow seq has not yet
* been set. Nevertheless we know that the caller will set it to
* write_seq.
*/
send_win = tcp_wnd_end(tp) - tp->write_seq;
/* From in_flight test above, we know that cwnd > in_flight. */
cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
limit = min(send_win, cong_win);
/* If a full-sized TSO skb can be sent, do it. */
if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
sk->sk_gso_max_segs * tp->mss_cache))
goto send_now;
/* Middle in queue won't get any more data, full sendable already? */
if ((skb != tcp_write_queue_tail(meta_sk)) && (limit >= skb->len))
goto send_now;
win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
if (win_divisor) {
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
/* If at least some fraction of a window is available,
* just use it.
*/
chunk /= win_divisor;
if (limit >= chunk)
goto send_now;
} else {
/* Different approach, try not to defer past a single
* ACK. Receiver should ACK every other full sized
* frame, so if we have space for more than 3 frames
* then send now.
*/
if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
goto send_now;
}
/* Ok, it looks like it is advisable to defer.
* Do not rearm the timer if already set to not break TCP ACK clocking.
*/
if (!meta_tp->tso_deferred)
meta_tp->tso_deferred = 1 | (jiffies << 1);
return true;
send_now:
meta_tp->tso_deferred = 0;
return false;
}
/* Create a new MTU probe if we are ready.
* MTU probe is regularly attempting to increase the path MTU by
* deliberately sending larger packets. This discovers routing
* changes resulting in larger path MTUs.
*
* Returns 0 if we should wait to probe (no cwnd available),
* 1 if a probe was sent,
* -1 otherwise
*/
int tcp_mtu_probe(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb, *nskb, *next;
int len;
int probe_size;
int size_needed;
int copy;
int mss_now;
/* Not currently probing/verifying,
* not in recovery,
* have enough cwnd, and
* not SACKing (the variable headers throw things off) */
if (!icsk->icsk_mtup.enabled ||
icsk->icsk_mtup.probe_size ||
inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
tp->snd_cwnd < 11 ||
tp->rx_opt.num_sacks || tp->rx_opt.dsack)
return -1;
/* Very simple search strategy: just double the MSS. */
mss_now = tcp_current_mss(sk);
probe_size = 2 * tp->mss_cache;
size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
/* TODO: set timer for probe_converge_event */
return -1;
}
/* Have enough data in the send queue to probe? */
if (tp->write_seq - tp->snd_nxt < size_needed)
return -1;
if (tp->snd_wnd < size_needed)
return -1;
if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
return 0;
/* Do we need to wait to drain cwnd? With none in flight, don't stall */
if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
if (!tcp_packets_in_flight(tp))
return -1;
else
return 0;
}
/* We're allowed to probe. Build it now. */
if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
return -1;
sk->sk_wmem_queued += nskb->truesize;
sk_mem_charge(sk, nskb->truesize);
skb = tcp_send_head(sk);
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
TCP_SKB_CB(nskb)->sacked = 0;
nskb->csum = 0;
nskb->ip_summed = skb->ip_summed;
tcp_insert_write_queue_before(nskb, skb, sk);
len = 0;
tcp_for_write_queue_from_safe(skb, next, sk) {
copy = min_t(int, skb->len, probe_size - len);
if (nskb->ip_summed)
skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
else
nskb->csum = skb_copy_and_csum_bits(skb, 0,
skb_put(nskb, copy),
copy, nskb->csum);
if (skb->len <= copy) {
/* We've eaten all the data from this skb.
* Throw it away. */
TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
} else {
TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
~(TCPHDR_FIN|TCPHDR_PSH);
if (!skb_shinfo(skb)->nr_frags) {
skb_pull(skb, copy);
if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->csum = csum_partial(skb->data,
skb->len, 0);
} else {
__pskb_trim_head(skb, copy);
tcp_set_skb_tso_segs(sk, skb, mss_now);
}
TCP_SKB_CB(skb)->seq += copy;
}
len += copy;
if (len >= probe_size)
break;
}
tcp_init_tso_segs(sk, nskb, nskb->len);
/* We're ready to send. If this fails, the probe will
* be resegmented into mss-sized pieces by tcp_write_xmit(). */
TCP_SKB_CB(nskb)->when = tcp_time_stamp;
if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
/* Decrement cwnd here because we are sending
* effectively two packets. */
tp->snd_cwnd--;
tcp_event_new_data_sent(sk, nskb);
icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
return 1;
}
return -1;
}
/* This routine writes packets to the network. It advances the
* send_head. This happens as incoming acks open up the remote
* window for us.
*
* LARGESEND note: !tcp_urg_mode is overkill, only frames between
* snd_up-64k-mss .. snd_up cannot be large. However, taking into
* account rare use of URG, this is not a big flaw.
*
* Send at most one packet when push_one > 0. Temporarily ignore
* cwnd limit to force at most one packet out when push_one == 2.
* Returns true, if no segments are in flight and we have queued segments,
* but cannot send anything now because of SWS or another problem.
*/
static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
int push_one, gfp_t gfp)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
unsigned int tso_segs, sent_pkts;
int cwnd_quota;
int result;
//printf("mptcp?::%d, %d\n", sk->__sk_common.skc_daddr, sk->__sk_common.skc_rcv_saddr);
if (is_meta_sk(sk))
return mptcp_write_xmit(sk, mss_now, nonagle, push_one, gfp);
sent_pkts = 0;
if (!push_one) {
/* Do MTU probing. */
result = tcp_mtu_probe(sk);
if (!result) {
return false;
} else if (result > 0) {
sent_pkts = 1;
}
}
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
BUG_ON(!tso_segs);
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
goto repair; /* Skip network transmission */
cwnd_quota = tcp_cwnd_test(tp, skb);
if (!cwnd_quota) {
if (push_one == 2)
/* Force out a loss probe pkt. */
cwnd_quota = 1;
else
break;
}
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
break;
if (tso_segs == 1) {
if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
(tcp_skb_is_last(sk, skb) ?
nonagle : TCP_NAGLE_PUSH))))
break;
} else {
if (!push_one && tcp_tso_should_defer(sk, skb))
break;
}
/* TSQ : sk_wmem_alloc accounts skb truesize,
* including skb overhead. But thats OK.
*/
if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) {
set_bit(TSQ_THROTTLED, &tp->tsq_flags);
break;
}
limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
min_t(unsigned int,
cwnd_quota,
sk->sk_gso_max_segs));
if (skb->len > limit &&
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;
repair:
/* Advance the send_head. This one is sent out.
* This call will increment packets_out.
*/
tcp_event_new_data_sent(sk, skb);
tcp_minshall_update(tp, mss_now, skb);
sent_pkts += tcp_skb_pcount(skb);
if (push_one)
break;
}
if (likely(sent_pkts)) {
if (tcp_in_cwnd_reduction(sk))
tp->prr_out += sent_pkts;
/* Send one loss probe per tail loss episode. */
if (push_one != 2)
tcp_schedule_loss_probe(sk);
tcp_cwnd_validate(sk);
return false;
}
return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
}
bool tcp_schedule_loss_probe(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
u32 timeout, tlp_time_stamp, rto_time_stamp;
u32 rtt = tp->srtt >> 3;
if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
return false;
/* No consecutive loss probes. */
if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
tcp_rearm_rto(sk);
return false;
}
/* Don't do any loss probe on a Fast Open connection before 3WHS
* finishes.
*/
if (sk->sk_state == TCP_SYN_RECV)
return false;
/* TLP is only scheduled when next timer event is RTO. */
if (icsk->icsk_pending != ICSK_TIME_RETRANS)
return false;
/* Schedule a loss probe in 2*RTT for SACK capable connections
* in Open state, that are either limited by cwnd or application.
*/
if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out ||
!tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
return false;
if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
tcp_send_head(sk))
return false;
/* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
* for delayed ack when there's one outstanding packet.
*/
timeout = rtt << 1;
if (tp->packets_out == 1)
timeout = max_t(u32, timeout,
(rtt + (rtt >> 1) + TCP_DELACK_MAX));
timeout = max_t(u32, timeout, msecs_to_jiffies(10));
/* If RTO is shorter, just schedule TLP in its place. */
tlp_time_stamp = tcp_time_stamp + timeout;
rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
s32 delta = rto_time_stamp - tcp_time_stamp;
if (delta > 0)
timeout = delta;
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
TCP_RTO_MAX);
return true;
}
/* When probe timeout (PTO) fires, send a new segment if one exists, else
* retransmit the last segment.
*/
void tcp_send_loss_probe(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int pcount;
int mss = tcp_current_mss(sk);
int err = -1;
if (tcp_send_head(sk) != NULL) {
err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
goto rearm_timer;
}
/* At most one outstanding TLP retransmission. */
if (tp->tlp_high_seq)
goto rearm_timer;
/* Retransmit last segment. */
skb = tcp_write_queue_tail(sk);
if (WARN_ON(!skb))
goto rearm_timer;
pcount = tcp_skb_pcount(skb);
if (WARN_ON(!pcount))
goto rearm_timer;
if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss)))
goto rearm_timer;
skb = tcp_write_queue_tail(sk);
}
if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
goto rearm_timer;
/* Probe with zero data doesn't trigger fast recovery. */
if (skb->len > 0)
err = __tcp_retransmit_skb(sk, skb);
/* Record snd_nxt for loss detection. */
if (likely(!err))
tp->tlp_high_seq = tp->snd_nxt;
rearm_timer:
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
TCP_RTO_MAX);
if (likely(!err))
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPLOSSPROBES);
return;
}
/* Push out any pending frames which were held back due to
* TCP_CORK or attempt at coalescing tiny packets.
* The socket must be locked by the caller.
*/
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle)
{
/* If we are closed, the bytes will have to remain here.
* In time closedown will finish, we empty the write queue and
* all will be happy.
*/
if (unlikely(sk->sk_state == TCP_CLOSE))
return;
if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
sk_gfp_atomic(sk, GFP_ATOMIC)))
tcp_check_probe_timer(sk);
}
/* Send _single_ skb sitting at the send head. This function requires
* true push pending frames to setup probe timer etc.
*/
void tcp_push_one(struct sock *sk, unsigned int mss_now)
{
struct sk_buff *skb = tcp_send_head(sk);
BUG_ON(!skb || skb->len < mss_now);
tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
}
/* This function returns the amount that we can raise the
* usable window based on the following constraints
*
* 1. The window can never be shrunk once it is offered (RFC 793)
* 2. We limit memory per socket
*
* RFC 1122:
* "the suggested [SWS] avoidance algorithm for the receiver is to keep
* RECV.NEXT + RCV.WIN fixed until:
* RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
*
* i.e. don't raise the right edge of the window until you can raise
* it at least MSS bytes.
*
* Unfortunately, the recommended algorithm breaks header prediction,
* since header prediction assumes th->window stays fixed.
*
* Strictly speaking, keeping th->window fixed violates the receiver
* side SWS prevention criteria. The problem is that under this rule
* a stream of single byte packets will cause the right side of the
* window to always advance by a single byte.
*
* Of course, if the sender implements sender side SWS prevention
* then this will not be a problem.
*
* BSD seems to make the following compromise:
*
* If the free space is less than the 1/4 of the maximum
* space available and the free space is less than 1/2 mss,
* then set the window to 0.
* [ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
* Otherwise, just prevent the window from shrinking
* and from being larger than the largest representable value.
*
* This prevents incremental opening of the window in the regime
* where TCP is limited by the speed of the reader side taking
* data out of the TCP receive queue. It does nothing about
* those cases where the window is constrained on the sender side
* because the pipeline is full.
*
* BSD also seems to "accidentally" limit itself to windows that are a
* multiple of MSS, at least until the free space gets quite small.
* This would appear to be a side effect of the mbuf implementation.
* Combining these two algorithms results in the observed behavior
* of having a fixed window size at almost all times.
*
* Below we obtain similar behavior by forcing the offered window to
* a multiple of the mss when it is feasible to do so.
*
* Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
* Regular options like TIMESTAMP are taken into account.
*/
u32 __tcp_select_window(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
/* MSS for the peer's data. Previous versions used mss_clamp
* here. I don't know if the value based on our guesses
* of peer's MSS is better for the performance. It's more correct
* but may be worse for the performance because of rcv_mss
* fluctuations. --SAW 1998/11/1
*/
int mss = icsk->icsk_ack.rcv_mss;
int free_space = tcp_space(sk);
int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
int window;
if (tp->mpc)
return __mptcp_select_window(sk);
if (mss > full_space)
mss = full_space;
if (free_space < (full_space >> 1)) {
icsk->icsk_ack.quick = 0;
if (sk_under_memory_pressure(sk))
tp->rcv_ssthresh = min(tp->rcv_ssthresh,
4U * tp->advmss);
if (free_space < mss)
return 0;
}
if (free_space > tp->rcv_ssthresh)
free_space = tp->rcv_ssthresh;
/* Don't do rounding if we are using window scaling, since the
* scaled window will not line up with the MSS boundary anyway.
*/
window = tp->rcv_wnd;
if (tp->rx_opt.rcv_wscale) {
window = free_space;
/* Advertise enough space so that it won't get scaled away.
* Import case: prevent zero window announcement if
* 1<<rcv_wscale > mss.
*/
if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
window = (((window >> tp->rx_opt.rcv_wscale) + 1)
<< tp->rx_opt.rcv_wscale);
} else {
/* Get the largest window that is a nice multiple of mss.
* Window clamp already applied above.
* If our current window offering is within 1 mss of the
* free space we just keep it. This prevents the divide
* and multiply from happening most of the time.
* We also don't do any window rounding when the free space
* is too small.
*/
if (window <= free_space - mss || window > free_space)
window = (free_space / mss) * mss;
else if (mss == full_space &&
free_space > window + (full_space >> 1))
window = free_space;
}
return window;
}
/* Collapses two adjacent SKB's during retransmission. */
static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
int skb_size, next_skb_size;
skb_size = skb->len;
next_skb_size = next_skb->len;
BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
tcp_highest_sack_combine(sk, next_skb, skb);
tcp_unlink_write_queue(next_skb, sk);
skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
next_skb_size);
if (next_skb->ip_summed == CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_PARTIAL;
if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
/* Update sequence range on original skb. */
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
/* Merge over control information. This moves PSH/FIN etc. over */
TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
/* All done, get rid of second SKB and account for it so
* packet counting does not break.
*/
TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
/* changed transmit queue under us so clear hints */
tcp_clear_retrans_hints_partial(tp);
if (next_skb == tp->retransmit_skb_hint)
tp->retransmit_skb_hint = skb;
tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
sk_wmem_free_skb(sk, next_skb);
}
/* Check if coalescing SKBs is legal. */
static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
{
if (tcp_skb_pcount(skb) > 1)
return false;
/* TODO: SACK collapsing could be used to remove this condition */
if (skb_shinfo(skb)->nr_frags != 0)
return false;
if (skb_cloned(skb))
return false;
if (skb == tcp_send_head(sk))
return false;
/* Some heurestics for collapsing over SACK'd could be invented */
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
return false;
return true;
}
/* Collapse packets in the retransmit queue to make to create
* less packets on the wire. This is only done on retransmission.
*/
static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
int space)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = to, *tmp;
bool first = true;
if (!sysctl_tcp_retrans_collapse)
return;
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
return;
/* Currently not supported for MPTCP - but it should be possible */
if (tp->mpc)
return;
tcp_for_write_queue_from_safe(skb, tmp, sk) {
if (!tcp_can_collapse(sk, skb))
break;
space -= skb->len;
if (first) {
first = false;
continue;
}
if (space < 0)
break;
/* Punt if not enough space exists in the first SKB for
* the data in the second
*/
if (skb->len > skb_availroom(to))
break;
if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
break;
tcp_collapse_retrans(sk, to);
}
}
/* This retransmits one SKB. Policy decisions and retransmit queue
* state updates are done by the caller. Returns non-zero if an
* error occurred which prevented the send.
*/
int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
unsigned int cur_mss;
/* Inconslusive MTU probe */
if (icsk->icsk_mtup.probe_size) {
icsk->icsk_mtup.probe_size = 0;
}
/* Do not sent more than we queued. 1/4 is reserved for possible
* copying overhead: fragmentation, tunneling, mangling etc.
*/
if (atomic_read(&sk->sk_wmem_alloc) >
min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
return -EAGAIN;
if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
BUG();
if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
return -ENOMEM;
}
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
return -EHOSTUNREACH; /* Routing failure or similar. */
cur_mss = tcp_current_mss(sk);
/* If receiver has shrunk his window, and skb is out of
* new window, do not retransmit it. The exception is the
* case, when window is shrunk to zero. In this case
* our retransmit serves as a zero window probe.
*/
if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
TCP_SKB_CB(skb)->seq != tp->snd_una)
return -EAGAIN;
if (skb->len > cur_mss) {
if (tcp_fragment(sk, skb, cur_mss, cur_mss))
return -ENOMEM; /* We'll try again later. */
} else {
int oldpcount = tcp_skb_pcount(skb);
if (unlikely(oldpcount > 1)) {
tcp_init_tso_segs(sk, skb, cur_mss);
tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
}
}
tcp_retrans_try_collapse(sk, skb, cur_mss);
/* Some Solaris stacks overoptimize and ignore the FIN on a
* retransmit when old data is attached. So strip it off
* since it is cheap to do so and saves bytes on the network.
*/
if (skb->len > 0 &&
(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
if (!pskb_trim(skb, 0)) {
/* Reuse, even though it does some unnecessary work */
tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
TCP_SKB_CB(skb)->tcp_flags);
skb->ip_summed = CHECKSUM_NONE;
}
}
/* Make a copy, if the first transmission SKB clone we made
* is still in somebody's hands, else make a clone.
*/
TCP_SKB_CB(skb)->when = tcp_time_stamp;
/* make sure skb->data is aligned on arches that require it
* and check if ack-trimming & collapsing extended the headroom
* beyond what csum_start can cover.
*/
if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
skb_headroom(skb) >= 0xFFFF)) {
struct sk_buff *nskb;
if (mptcp_is_data_seq(skb))
skb_push(skb, MPTCP_SUB_LEN_DSS_ALIGN +
MPTCP_SUB_LEN_ACK_ALIGN +
MPTCP_SUB_LEN_SEQ_ALIGN);
nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
if (mptcp_is_data_seq(skb)) {
skb_pull(skb, MPTCP_SUB_LEN_DSS_ALIGN +
MPTCP_SUB_LEN_ACK_ALIGN +
MPTCP_SUB_LEN_SEQ_ALIGN);
if (nskb)
skb_pull(nskb, MPTCP_SUB_LEN_DSS_ALIGN +
MPTCP_SUB_LEN_ACK_ALIGN +
MPTCP_SUB_LEN_SEQ_ALIGN);
}
return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-ENOBUFS;
} else {
return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
}
int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
int err = __tcp_retransmit_skb(sk, skb);
if (err == 0) {
/* Update global TCP statistics. */
TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
tp->total_retrans++;
#if FASTRETRANS_DEBUG > 0
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
net_dbg_ratelimited("retrans_out leaked\n");
}
#endif
if (!tp->retrans_out)
tp->lost_retrans_low = tp->snd_nxt;
TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
tp->retrans_out += tcp_skb_pcount(skb);
/* Save stamp of the first retransmit. */
if (!tp->retrans_stamp)
tp->retrans_stamp = TCP_SKB_CB(skb)->when;
tp->undo_retrans += tcp_skb_pcount(skb);
/* snd_nxt is stored to detect loss of retransmitted segment,
* see tcp_input.c tcp_sacktag_write_queue().
*/
TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
} else {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
}
return err;
}
/* Check if we forward retransmits are possible in the current
* window/congestion state.
*/
static bool tcp_can_forward_retransmit(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
/* Forward retransmissions are possible only during Recovery. */
if (icsk->icsk_ca_state != TCP_CA_Recovery)
return false;
/* No forward retransmissions in Reno are possible. */
if (tcp_is_reno(tp))
return false;
/* Yeah, we have to make difficult choice between forward transmission
* and retransmission... Both ways have their merits...
*
* For now we do not retransmit anything, while we have some new
* segments to send. In the other cases, follow rule 3 for
* NextSeg() specified in RFC3517.
*/
if (tcp_may_send_now(sk))
return false;
return true;
}
/* This gets called after a retransmit timeout, and the initially
* retransmitted data is acknowledged. It tries to continue
* resending the rest of the retransmit queue, until either
* we've sent it all or the congestion window limit is reached.
* If doing SACK, the first ACK which comes back for a timeout
* based retransmit packet might feed us FACK information again.
* If so, we use it to avoid unnecessarily retransmissions.
*/
void tcp_xmit_retransmit_queue(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
struct sk_buff *hole = NULL;
u32 last_lost;
int mib_idx;
int fwd_rexmitting = 0;
if (!tp->packets_out)
return;
if (!tp->lost_out)
tp->retransmit_high = tp->snd_una;
if (tp->retransmit_skb_hint) {
skb = tp->retransmit_skb_hint;
last_lost = TCP_SKB_CB(skb)->end_seq;
if (after(last_lost, tp->retransmit_high))
last_lost = tp->retransmit_high;
} else {
skb = tcp_write_queue_head(sk);
last_lost = tp->snd_una;
}
tcp_for_write_queue_from(skb, sk) {
__u8 sacked = TCP_SKB_CB(skb)->sacked;
if (skb == tcp_send_head(sk))
break;
/* we could do better than to assign each time */
if (hole == NULL)
tp->retransmit_skb_hint = skb;
/* Assume this retransmit will generate
* only one packet for congestion window
* calculation purposes. This works because
* tcp_retransmit_skb() will chop up the
* packet to be MSS sized and all the
* packet counting works out.
*/
if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
return;
if (fwd_rexmitting) {
begin_fwd:
if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
break;
mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
tp->retransmit_high = last_lost;
if (!tcp_can_forward_retransmit(sk))
break;
/* Backtrack if necessary to non-L'ed skb */
if (hole != NULL) {
skb = hole;
hole = NULL;
}
fwd_rexmitting = 1;
goto begin_fwd;
} else if (!(sacked & TCPCB_LOST)) {
if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
hole = skb;
continue;
} else {
last_lost = TCP_SKB_CB(skb)->end_seq;
if (icsk->icsk_ca_state != TCP_CA_Loss)
mib_idx = LINUX_MIB_TCPFASTRETRANS;
else
mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
}
if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
continue;
if (tcp_retransmit_skb(sk, skb))
return;
NET_INC_STATS_BH(sock_net(sk), mib_idx);
if (tcp_in_cwnd_reduction(sk))
tp->prr_out += tcp_skb_pcount(skb);
if (skb == tcp_write_queue_head(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
TCP_RTO_MAX);
}
}
/* Send a fin. The caller locks the socket for us. This cannot be
* allowed to fail queueing a FIN frame under any circumstances.
*/
void tcp_send_fin(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_write_queue_tail(sk);
int mss_now;
/* Optimization, tack on the FIN if we have a queue of
* unsent frames. But be careful about outgoing SACKS
* and IP options.
*/
mss_now = tcp_current_mss(sk);
if (tcp_send_head(sk) != NULL) {
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
TCP_SKB_CB(skb)->end_seq++;
tp->write_seq++;
} else {
/* Socket is locked, keep trying until memory is available. */
for (;;) {
skb = alloc_skb_fclone(MAX_TCP_HEADER,
sk->sk_allocation);
if (skb)
break;
yield();
}
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, MAX_TCP_HEADER);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq,
TCPHDR_ACK | TCPHDR_FIN);
tcp_queue_skb(sk, skb);
}
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
}
/* We get here when a process closes a file descriptor (either due to
* an explicit close() or as a byproduct of exit()'ing) and there
* was unread data in the receive queue. This behavior is recommended
* by RFC 2525, section 2.17. -DaveM
*/
void tcp_send_active_reset(struct sock *sk, gfp_t priority)
{
struct sk_buff *skb;
if (is_meta_sk(sk)) {
mptcp_send_active_reset(sk, priority);
return;
}
/* NOTE: No TCP options attached and we never retransmit this. */
skb = alloc_skb(MAX_TCP_HEADER, priority);
if (!skb) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
return;
}
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, MAX_TCP_HEADER);
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
TCPHDR_ACK | TCPHDR_RST);
/* Send it off. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb, 0, priority))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
}
/* Send a crossed SYN-ACK during socket establishment.
* WARNING: This routine must only be called when we have already sent
* a SYN packet that crossed the incoming SYN that caused this routine
* to get called. If this assumption fails then the initial rcv_wnd
* and rcv_wscale values will not be correct.
*/
int tcp_send_synack(struct sock *sk)
{
struct sk_buff *skb;
skb = tcp_write_queue_head(sk);
if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
pr_debug("%s: wrong queue state\n", __func__);
return -EFAULT;
}
if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
if (nskb == NULL)
return -ENOMEM;
tcp_unlink_write_queue(skb, sk);
skb_header_release(nskb);
__tcp_add_write_queue_head(sk, nskb);
sk_wmem_free_skb(sk, skb);
sk->sk_wmem_queued += nskb->truesize;
sk_mem_charge(sk, nskb->truesize);
skb = nskb;
}
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
TCP_ECN_send_synack(tcp_sk(sk), skb);
}
TCP_SKB_CB(skb)->when = tcp_time_stamp;
return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
/**
* tcp_make_synack - Prepare a SYN-ACK.
* sk: listener socket
* dst: dst entry attached to the SYNACK
* req: request_sock pointer
*
* Allocate one skb and build a SYNACK packet.
* @dst is consumed : Caller should not use it again.
*/
struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct tcp_fastopen_cookie *foc)
{
struct tcp_out_options opts;
struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_sock *tp = tcp_sk(sk);
struct tcphdr *th;
struct sk_buff *skb;
struct tcp_md5sig_key *md5;
int tcp_header_size;
int mss;
skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
if (unlikely(!skb)) {
dst_release(dst);
return NULL;
}
/* Reserve space for headers. */
skb_reserve(skb, MAX_TCP_HEADER);
skb_dst_set(skb, dst);
security_skb_owned_by(skb, sk);
mss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
mss = tp->rx_opt.user_mss;
if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
__u8 rcv_wscale;
/* Set this up on the first call only */
req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
/* limit the window selection if the user enforce a smaller rx buffer */
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
(req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
req->window_clamp = tcp_full_space(sk);
tcp_select_initial_window(tcp_full_space(sk),
mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) -
(tcp_rsk(req)->saw_mpc ? MPTCP_SUB_LEN_DSM_ALIGN : 0),
&req->rcv_wnd,
&req->window_clamp,
ireq->wscale_ok,
&rcv_wscale,
dst_metric(dst, RTAX_INITRWND), sk);
ireq->rcv_wscale = rcv_wscale;
}
memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts))
TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
else
#endif
TCP_SKB_CB(skb)->when = tcp_time_stamp;
tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
foc) + sizeof(*th);
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
th = tcp_hdr(skb);
memset(th, 0, sizeof(struct tcphdr));
th->syn = 1;
th->ack = 1;
TCP_ECN_make_synack(req, th);
th->source = ireq->loc_port;
th->dest = ireq->rmt_port;
/* Setting of flags are superfluous here for callers (and ECE is
* not even correctly set)
*/
tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
TCPHDR_SYN | TCPHDR_ACK);
th->seq = htonl(TCP_SKB_CB(skb)->seq);
/* XXX data is queued and acked as is. No buffer/window check */
th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
th->window = htons(min(req->rcv_wnd, 65535U));
tcp_options_write((__be32 *)(th + 1), tp, &opts, skb);
th->doff = (tcp_header_size >> 2);
TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
#ifdef CONFIG_TCP_MD5SIG
/* Okay, we have all we need - do the md5 hash if needed */
if (md5) {
tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
md5, NULL, req, skb);
}
#endif
return skb;
}
EXPORT_SYMBOL(tcp_make_synack);
/* Do all connect socket setups that can be done AF independent. */
void tcp_connect_init(struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk);
__u8 rcv_wscale;
/* We'll fix this up when we get a response from the other end.
* See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
*/
tp->tcp_header_len = sizeof(struct tcphdr) +
(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
#ifdef CONFIG_TCP_MD5SIG
if (tp->af_specific->md5_lookup(sk, sk) != NULL)
tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
#endif
/* If user gave his TCP_MAXSEG, record it to clamp */
if (tp->rx_opt.user_mss)
tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
tp->max_window = 0;
tcp_mtup_init(sk);
tcp_sync_mss(sk, dst_mtu(dst));
if (!tp->window_clamp)
tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
tp->advmss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
tp->advmss = tp->rx_opt.user_mss;
tcp_initialize_rcv_mss(sk);
/* limit the window selection if the user enforce a smaller rx buffer */
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
(tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
tp->window_clamp = tcp_full_space(sk);
tcp_select_initial_window(tcp_full_space(sk),
tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
&tp->rcv_wnd,
&tp->window_clamp,
sysctl_tcp_window_scaling,
&rcv_wscale,
dst_metric(dst, RTAX_INITRWND), sk);
tp->rx_opt.rcv_wscale = rcv_wscale;
tp->rcv_ssthresh = tp->rcv_wnd;
sk->sk_err = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->snd_wnd = 0;
tcp_init_wl(tp, 0);
tp->snd_una = tp->write_seq;
tp->snd_sml = tp->write_seq;
tp->snd_up = tp->write_seq;
tp->snd_nxt = tp->write_seq;
if (likely(!tp->repair))
tp->rcv_nxt = 0;
else
tp->rcv_tstamp = tcp_time_stamp;
tp->rcv_wup = tp->rcv_nxt;
tp->copied_seq = tp->rcv_nxt;
inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
inet_csk(sk)->icsk_retransmits = 0;
tcp_clear_retrans(tp);
#ifdef CONFIG_MPTCP
if (sysctl_mptcp_enabled && mptcp_doit(sk)) {
if (is_master_tp(tp)) {
tp->request_mptcp = 1;
mptcp_connect_init(sk);
} else if (tp->mptcp) {
tp->mptcp->snt_isn = tp->write_seq;
tp->mptcp->init_rcv_wnd = tp->rcv_wnd;
}
}
#endif
}
static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
tcb->end_seq += skb->len;
skb_header_release(skb);
__tcp_add_write_queue_tail(sk, skb);
sk->sk_wmem_queued += skb->truesize;
sk_mem_charge(sk, skb->truesize);
tp->write_seq = tcb->end_seq;
tp->packets_out += tcp_skb_pcount(skb);
}
/* Build and send a SYN with data and (cached) Fast Open cookie. However,
* queue a data-only packet after the regular SYN, such that regular SYNs
* are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
* only the SYN sequence, the data are retransmitted in the first ACK.
* If cookie is not cached or other error occurs, falls back to send a
* regular SYN with Fast Open cookie request option.
*/
static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_fastopen_request *fo = tp->fastopen_req;
int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
struct sk_buff *syn_data = NULL, *data;
unsigned long last_syn_loss = 0;
tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
&syn_loss, &last_syn_loss);
/* Recurring FO SYN losses: revert to regular handshake temporarily */
if (syn_loss > 1 &&
time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
fo->cookie.len = -1;
goto fallback;
}
if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
fo->cookie.len = -1;
else if (fo->cookie.len <= 0)
goto fallback;
/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
* user-MSS. Reserve maximum option space for middleboxes that add
* private TCP options. The cost is reduced data space in SYN :(
*/
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
MAX_TCP_OPTION_SPACE;
syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
sk->sk_allocation);
if (syn_data == NULL)
goto fallback;
for (i = 0; i < iovlen && syn_data->len < space; ++i) {
struct iovec *iov = &fo->data->msg_iov[i];
unsigned char __user *from = iov->iov_base;
int len = iov->iov_len;
if (syn_data->len + len > space)
len = space - syn_data->len;
else if (i + 1 == iovlen)
/* No more data pending in inet_wait_for_connect() */
fo->data = NULL;
if (skb_add_data(syn_data, from, len))
goto fallback;
}
/* Queue a data-only packet after the regular SYN for retransmission */
data = pskb_copy(syn_data, sk->sk_allocation);
if (data == NULL)
goto fallback;
TCP_SKB_CB(data)->seq++;
TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
tcp_connect_queue_skb(sk, data);
fo->copied = data->len;
if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
tp->syn_data = (fo->copied > 0);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
goto done;
}
syn_data = NULL;
fallback:
/* Send a regular SYN with Fast Open cookie request option */
if (fo->cookie.len > 0)
fo->cookie.len = 0;
err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
if (err)
tp->syn_fastopen = 0;
kfree_skb(syn_data);
done:
fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */
return err;
}
/* Build a SYN and send it off. */
int tcp_connect(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff;
int err;
//printf("cwnd_init:%d\n", tcp_sk(sk)->snd_cwnd);
tcp_connect_init(sk);
if (unlikely(tp->repair)) {
tcp_finish_connect(sk, NULL);
return 0;
}
buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
if (unlikely(buff == NULL))
return -ENOBUFS;
/* Reserve space for headers. */
skb_reserve(buff, MAX_TCP_HEADER);
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
tcp_connect_queue_skb(sk, buff);
TCP_ECN_send_syn(sk, buff);
//tcp_sk(sk)->snd_cwnd = 0;
//printf("cwnd_init:%d\n", tcp_sk(sk)->snd_cwnd);
/* Send off SYN; include data in Fast Open. */
err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
if (err == -ECONNREFUSED)
return err;
/* We change tp->snd_nxt after the tcp_transmit_skb() call
* in order to make this packet get counted in tcpOutSegs.
*/
tp->snd_nxt = tp->write_seq;
tp->pushed_seq = tp->write_seq;
TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
/* Timer for repeating the SYN until an answer. */
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
return 0;
}
EXPORT_SYMBOL(tcp_connect);
/* Send out a delayed ack, the caller does the policy checking
* to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
* for details.
*/
void tcp_send_delayed_ack(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
int ato = icsk->icsk_ack.ato;
unsigned long timeout;
if (ato > TCP_DELACK_MIN) {
const struct tcp_sock *tp = tcp_sk(sk);
int max_ato = HZ / 2;
if (icsk->icsk_ack.pingpong ||
(icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
max_ato = TCP_DELACK_MAX;
/* Slow path, intersegment interval is "high". */
/* If some rtt estimate is known, use it to bound delayed ack.
* Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
* directly.
*/
if (tp->srtt) {
int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
if (rtt < max_ato)
max_ato = rtt;
}
ato = min(ato, max_ato);
}
/* Stay within the limit we were given */
timeout = jiffies + ato;
/* Use new timeout only if there wasn't a older one earlier. */
if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
/* If delack timer was blocked or is about to expire,
* send ACK now.
*/
if (icsk->icsk_ack.blocked ||
time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
tcp_send_ack(sk);
return;
}
if (!time_before(timeout, icsk->icsk_ack.timeout))
timeout = icsk->icsk_ack.timeout;
}
icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
icsk->icsk_ack.timeout = timeout;
sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
}
/* This routine sends an ack and also updates the window. */
void tcp_send_ack(struct sock *sk)
{
struct sk_buff *buff;
/* If we have been reset, we may not send again. */
if (sk->sk_state == TCP_CLOSE)
return;
/* We are not putting this on the write queue, so
* tcp_transmit_skb() will set the ownership to this
* sock.
*/
buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
if (buff == NULL) {
inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
TCP_DELACK_MAX, TCP_RTO_MAX);
return;
}
/* Reserve space for headers and prepare control bits. */
skb_reserve(buff, MAX_TCP_HEADER);
tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
/* Send it off, this clears delayed acks for us. */
TCP_SKB_CB(buff)->when = tcp_time_stamp;
tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
}
EXPORT_SYMBOL(tcp_send_ack);
/* This routine sends a packet with an out of date sequence
* number. It assumes the other end will try to ack it.
*
* Question: what should we make while urgent mode?
* 4.4BSD forces sending single byte of data. We cannot send
* out of window data, because we have SND.NXT==SND.MAX...
*
* Current solution: to send TWO zero-length segments in urgent mode:
* one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
* out-of-date with SND.UNA-1 to probe window.
*/
int tcp_xmit_probe_skb(struct sock *sk, int urgent)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
/* We don't queue it, tcp_transmit_skb() sets ownership. */
skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
if (skb == NULL)
return -1;
/* Reserve space for headers and set control bits. */
skb_reserve(skb, MAX_TCP_HEADER);
/* Use a previous sequence. This should cause the other
* end to send an ack. Don't queue or clone SKB, just
* send it.
*/
tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
TCP_SKB_CB(skb)->when = tcp_time_stamp;
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
}
void tcp_send_window_probe(struct sock *sk)
{
if (sk->sk_state == TCP_ESTABLISHED) {
tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
tcp_xmit_probe_skb(sk, 0);
}
}
/* Initiate keepalive or window probe from timer. */
int tcp_write_wakeup(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
if (sk->sk_state == TCP_CLOSE)
return -1;
if (is_meta_sk(sk))
return mptcp_write_wakeup(sk);
if ((skb = tcp_send_head(sk)) != NULL &&
before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
int err;
unsigned int mss = tcp_current_mss(sk);
unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
/* We are probing the opening of a window
* but the window size is != 0
* must have been a result SWS avoidance ( sender )
*/
if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
skb->len > mss) {
seg_size = min(seg_size, mss);
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
if (tcp_fragment(sk, skb, seg_size, mss))
return -1;
} else if (!tcp_skb_pcount(skb))
tcp_set_skb_tso_segs(sk, skb, mss);
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
if (!err)
tcp_event_new_data_sent(sk, skb);
return err;
} else {
if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
tcp_xmit_probe_skb(sk, 1);
return tcp_xmit_probe_skb(sk, 0);
}
}
/* A window probe timeout has occurred. If window is not closed send
* a partial packet else a zero probe.
*/
void tcp_send_probe0(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int err;
err = tcp_write_wakeup(sk);
if (tp->packets_out || !tcp_send_head(sk)) {
/* Cancel probe timer, if it is not required. */
icsk->icsk_probes_out = 0;
icsk->icsk_backoff = 0;
return;
}
if (err <= 0) {
if (icsk->icsk_backoff < sysctl_tcp_retries2)
icsk->icsk_backoff++;
icsk->icsk_probes_out++;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
TCP_RTO_MAX);
} else {
/* If packet was not sent due to local congestion,
* do not backoff and do not remember icsk_probes_out.
* Let local senders to fight for local resources.
*
* Use accumulated backoff yet.
*/
if (!icsk->icsk_probes_out)
icsk->icsk_probes_out = 1;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff,
TCP_RESOURCE_PROBE_INTERVAL),
TCP_RTO_MAX);
}
}
| ShogoFujii/PS-MPTCP | net/ipv4/tcp_output.c | C | gpl-2.0 | 99,138 |
/* Copyright (C) 2005-2006 Jean-Marc Valin
File: fftwrap.c
Wrapper for various FFTs
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of the Xiph.org Foundation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
/*#define USE_SMALLFT*/
#define USE_KISS_FFT
#include "misc.h"
#define MAX_FFT_SIZE 2048
#ifdef FIXED_POINT
static int maximize_range(spx_word16_t *in, spx_word16_t *out, spx_word16_t bound, int len)
{
int i, shift;
spx_word16_t max_val = 0;
for (i=0;i<len;i++)
{
if (in[i]>max_val)
max_val = in[i];
if (-in[i]>max_val)
max_val = -in[i];
}
shift=0;
while (max_val <= (bound>>1) && max_val != 0)
{
max_val <<= 1;
shift++;
}
for (i=0;i<len;i++)
{
out[i] = in[i] << shift;
}
return shift;
}
static void renorm_range(spx_word16_t *in, spx_word16_t *out, int shift, int len)
{
int i;
for (i=0;i<len;i++)
{
out[i] = (in[i] + (1<<(shift-1))) >> shift;
}
}
#endif
#ifdef USE_SMALLFT
#include "smallft.h"
#include <math.h>
void *spx_fft_init(int size)
{
struct drft_lookup *table;
table = speex_alloc(sizeof(struct drft_lookup));
spx_drft_init((struct drft_lookup *)table, size);
return (void*)table;
}
void spx_fft_destroy(void *table)
{
spx_drft_clear(table);
speex_free(table);
}
void spx_fft(void *table, float *in, float *out)
{
if (in==out)
{
int i;
speex_warning("FFT should not be done in-place");
float scale = 1./((struct drft_lookup *)table)->n;
for (i=0;i<((struct drft_lookup *)table)->n;i++)
out[i] = scale*in[i];
} else {
int i;
float scale = 1./((struct drft_lookup *)table)->n;
for (i=0;i<((struct drft_lookup *)table)->n;i++)
out[i] = scale*in[i];
}
spx_drft_forward((struct drft_lookup *)table, out);
}
void spx_ifft(void *table, float *in, float *out)
{
if (in==out)
{
int i;
speex_warning("FFT should not be done in-place");
} else {
int i;
for (i=0;i<((struct drft_lookup *)table)->n;i++)
out[i] = in[i];
}
spx_drft_backward((struct drft_lookup *)table, out);
}
#elif defined(USE_KISS_FFT)
#include "kiss_fftr.h"
#include "kiss_fft.h"
struct kiss_config {
kiss_fftr_cfg forward;
kiss_fftr_cfg backward;
kiss_fft_cpx *freq_data;
int N;
};
void *spx_fft_init(int size)
{
struct kiss_config *table;
table = (struct kiss_config*)speex_alloc(sizeof(struct kiss_config));
table->freq_data = (kiss_fft_cpx*)speex_alloc(sizeof(kiss_fft_cpx)*((size>>1)+1));
table->forward = kiss_fftr_alloc(size,0,NULL,NULL);
table->backward = kiss_fftr_alloc(size,1,NULL,NULL);
table->N = size;
return table;
}
void spx_fft_destroy(void *table)
{
struct kiss_config *t = (struct kiss_config *)table;
kiss_fftr_free(t->forward);
kiss_fftr_free(t->backward);
speex_free(t->freq_data);
speex_free(table);
}
#ifdef FIXED_POINT
void spx_fft(void *table, spx_word16_t *in, spx_word16_t *out)
{
int i;
int shift;
struct kiss_config *t = (struct kiss_config *)table;
shift = maximize_range(in, in, 32000, t->N);
kiss_fftr(t->forward, in, t->freq_data);
out[0] = t->freq_data[0].r;
for (i=1;i<t->N>>1;i++)
{
out[(i<<1)-1] = t->freq_data[i].r;
out[(i<<1)] = t->freq_data[i].i;
}
out[(i<<1)-1] = t->freq_data[i].r;
renorm_range(in, in, shift, t->N);
renorm_range(out, out, shift, t->N);
}
#else
void spx_fft(void *table, spx_word16_t *in, spx_word16_t *out)
{
int i;
float scale;
struct kiss_config *t = (struct kiss_config *)table;
scale = 1./t->N;
kiss_fftr(t->forward, in, t->freq_data);
out[0] = scale*t->freq_data[0].r;
for (i=1;i<t->N>>1;i++)
{
out[(i<<1)-1] = scale*t->freq_data[i].r;
out[(i<<1)] = scale*t->freq_data[i].i;
}
out[(i<<1)-1] = scale*t->freq_data[i].r;
}
#endif
void spx_ifft(void *table, spx_word16_t *in, spx_word16_t *out)
{
int i;
struct kiss_config *t = (struct kiss_config *)table;
t->freq_data[0].r = in[0];
t->freq_data[0].i = 0;
for (i=1;i<t->N>>1;i++)
{
t->freq_data[i].r = in[(i<<1)-1];
t->freq_data[i].i = in[(i<<1)];
}
t->freq_data[i].r = in[(i<<1)-1];
t->freq_data[i].i = 0;
kiss_fftri(t->backward, t->freq_data, out);
}
#else
#error No other FFT implemented
#endif
#ifdef FIXED_POINT
/*#include "smallft.h"*/
void spx_fft_float(void *table, float *in, float *out)
{
int i;
#ifdef USE_SMALLFT
int N = ((struct drft_lookup *)table)->n;
#elif defined(USE_KISS_FFT)
int N = ((struct kiss_config *)table)->N;
#else
#endif
#ifdef VAR_ARRAYS
spx_word16_t _in[N];
spx_word16_t _out[N];
#else
spx_word16_t _in[MAX_FFT_SIZE];
spx_word16_t _out[MAX_FFT_SIZE];
#endif
for (i=0;i<N;i++)
_in[i] = (int)floor(.5+in[i]);
spx_fft(table, _in, _out);
for (i=0;i<N;i++)
out[i] = _out[i];
#if 0
if (!fixed_point)
{
float scale;
struct drft_lookup t;
spx_drft_init(&t, ((struct kiss_config *)table)->N);
scale = 1./((struct kiss_config *)table)->N;
for (i=0;i<((struct kiss_config *)table)->N;i++)
out[i] = scale*in[i];
spx_drft_forward(&t, out);
spx_drft_clear(&t);
}
#endif
}
void spx_ifft_float(void *table, float *in, float *out)
{
int i;
#ifdef USE_SMALLFT
int N = ((struct drft_lookup *)table)->n;
#elif defined(USE_KISS_FFT)
int N = ((struct kiss_config *)table)->N;
#else
#endif
#ifdef VAR_ARRAYS
spx_word16_t _in[N];
spx_word16_t _out[N];
#else
spx_word16_t _in[MAX_FFT_SIZE];
spx_word16_t _out[MAX_FFT_SIZE];
#endif
for (i=0;i<N;i++)
_in[i] = (int)floor(.5+in[i]);
spx_ifft(table, _in, _out);
for (i=0;i<N;i++)
out[i] = _out[i];
#if 0
if (!fixed_point)
{
int i;
struct drft_lookup t;
spx_drft_init(&t, ((struct kiss_config *)table)->N);
for (i=0;i<((struct kiss_config *)table)->N;i++)
out[i] = in[i];
spx_drft_backward(&t, out);
spx_drft_clear(&t);
}
#endif
}
#else
void spx_fft_float(void *table, float *in, float *out)
{
spx_fft(table, in, out);
}
void spx_ifft_float(void *table, float *in, float *out)
{
spx_ifft(table, in, out);
}
#endif
| gabrieldelsaint/uol-messenger | src/libuolfone/wengophone-ng/current/wifo/phapi/speex/libspeex/fftwrap.c | C | gpl-2.0 | 7,664 |
/*
* IPv6 Address [auto]configuration
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* Changes:
*
* Janos Farkas : delete timer on ifdown
* <chexum@bankinf.banki.hu>
* Andi Kleen : kill double kfree on module
* unload.
* Maciej W. Rozycki : FDDI support
* sekiya@USAGI : Don't send too many RS
* packets.
* yoshfuji@USAGI : Fixed interval between DAD
* packets.
* YOSHIFUJI Hideaki @USAGI : improved accuracy of
* address validation timer.
* YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
* support.
* Yuji SEKIYA @USAGI : Don't assign a same IPv6
* address on a same interface.
* YOSHIFUJI Hideaki @USAGI : ARCnet support
* YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
* seq_file.
* YOSHIFUJI Hideaki @USAGI : improved source address
* selection; consider scope,
* status etc.
* Harout S. Hedeshian : procfs flag to toggle automatic
* addition of prefix route
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_addr.h>
#include <linux/if_arp.h>
#include <linux/if_arcnet.h>
#include <linux/if_infiniband.h>
#include <linux/route.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/slab.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/string.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/ndisc.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/tcp.h>
#include <net/ip.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <linux/if_tunnel.h>
#include <linux/rtnetlink.h>
#ifdef CONFIG_IPV6_PRIVACY
#include <linux/random.h>
#endif
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
/* Set to 3 to get tracing... */
//
//#define ACONF_DEBUG 2 // The original value.
#define ACONF_DEBUG 2 // To debug...
// LGE_CHANGE_E, [LGE_DATA][LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER], heeyeon.nah@lge.com, 2013-05-21
#if ACONF_DEBUG >= 3
#define ADBG(x) printk x
#else
#define ADBG(x)
#endif
#define INFINITY_LIFE_TIME 0xFFFFFFFF
//
//The value of global scope is 1.
//The value of link-local scope is 33.
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
#define LGE_DATA_GLOBAL_SCOPE 1
#define LGE_DATA_LINK_LOCAL_SCOPE 33
//The value which is 100 equals 1 second.
//So value which is 5 equals 50 milli-seconds.
//The 50 milli-seconds is requirements of LGU+.
#define LGE_DATA_WAITING_TIME_FOR_DAD_OF_LGU 5
#endif
//
static inline u32 cstamp_delta(unsigned long cstamp)
{
return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
}
#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1)
#define ADDRCONF_TIMER_FUZZ (HZ / 4)
#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
#ifdef CONFIG_SYSCTL
static void addrconf_sysctl_register(struct inet6_dev *idev);
static void addrconf_sysctl_unregister(struct inet6_dev *idev);
#else
static inline void addrconf_sysctl_register(struct inet6_dev *idev)
{
}
static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
{
}
#endif
#ifdef CONFIG_IPV6_PRIVACY
static int __ipv6_regen_rndid(struct inet6_dev *idev);
static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
static void ipv6_regen_rndid(unsigned long data);
#endif
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
static int ipv6_count_addresses(struct inet6_dev *idev);
/*
* Configured unicast address hash table
*/
static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
static DEFINE_SPINLOCK(addrconf_hash_lock);
static void addrconf_verify(unsigned long);
static DEFINE_TIMER(addr_chk_timer, addrconf_verify, 0, 0);
static DEFINE_SPINLOCK(addrconf_verify_lock);
static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
static void addrconf_type_change(struct net_device *dev,
unsigned long event);
static int addrconf_ifdown(struct net_device *dev, int how);
static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags);
static void addrconf_dad_timer(unsigned long data);
static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
static void addrconf_dad_run(struct inet6_dev *idev);
static void addrconf_rs_timer(unsigned long data);
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
static void inet6_prefix_notify(int event, struct inet6_dev *idev,
struct prefix_info *pinfo);
static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
struct net_device *dev);
static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
static struct ipv6_devconf ipv6_devconf __read_mostly = {
.forwarding = 0,
.hop_limit = IPV6_DEFAULT_HOPLIMIT,
.mtu6 = IPV6_MIN_MTU,
.accept_ra = 1,
.accept_redirects = 1,
.autoconf = 1,
.force_mld_version = 0,
.dad_transmits = 1,
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
#ifdef CONFIG_IPV6_PRIVACY
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
#endif
.max_addresses = IPV6_MAX_ADDRESSES,
.accept_ra_defrtr = 1,
.accept_ra_pinfo = 1,
#ifdef CONFIG_LGE_DHCPV6_WIFI
.ra_info_flag = 0,
#endif
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
.rtr_probe_interval = 60 * HZ,
#ifdef CONFIG_IPV6_ROUTE_INFO
.accept_ra_rt_info_max_plen = 0,
#endif
#endif
.accept_ra_rt_table = 0,
.proxy_ndp = 0,
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
.accept_dad = 1,
.accept_ra_prefix_route = 1,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.forwarding = 0,
.hop_limit = IPV6_DEFAULT_HOPLIMIT,
.mtu6 = IPV6_MIN_MTU,
.accept_ra = 1,
.accept_redirects = 1,
.autoconf = 1,
.dad_transmits = 1,
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
#ifdef CONFIG_IPV6_PRIVACY
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
#endif
.max_addresses = IPV6_MAX_ADDRESSES,
.accept_ra_defrtr = 1,
.accept_ra_pinfo = 1,
#ifdef CONFIG_LGE_DHCPV6_WIFI
.ra_info_flag = 0,
#endif
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
.rtr_probe_interval = 60 * HZ,
#ifdef CONFIG_IPV6_ROUTE_INFO
.accept_ra_rt_info_max_plen = 0,
#endif
#endif
.accept_ra_rt_table = 0,
.proxy_ndp = 0,
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
.accept_dad = 1,
.accept_ra_prefix_route = 1,
};
/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
/* Check if a valid qdisc is available */
static inline bool addrconf_qdisc_ok(const struct net_device *dev)
{
return !qdisc_tx_is_noop(dev);
}
/* Check if a route is valid prefix route */
static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
{
return (rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0;
}
static void addrconf_del_timer(struct inet6_ifaddr *ifp)
{
if (del_timer(&ifp->timer))
__in6_ifa_put(ifp);
}
enum addrconf_timer_t {
AC_NONE,
AC_DAD,
AC_RS,
};
static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
enum addrconf_timer_t what,
unsigned long when)
{
if (!del_timer(&ifp->timer))
in6_ifa_hold(ifp);
switch (what) {
case AC_DAD:
ifp->timer.function = addrconf_dad_timer;
break;
case AC_RS:
ifp->timer.function = addrconf_rs_timer;
break;
default:
break;
}
ifp->timer.expires = jiffies + when;
add_timer(&ifp->timer);
}
static int snmp6_alloc_dev(struct inet6_dev *idev)
{
if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
sizeof(struct ipstats_mib),
__alignof__(struct ipstats_mib)) < 0)
goto err_ip;
idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
GFP_KERNEL);
if (!idev->stats.icmpv6dev)
goto err_icmp;
idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
GFP_KERNEL);
if (!idev->stats.icmpv6msgdev)
goto err_icmpmsg;
return 0;
err_icmpmsg:
kfree(idev->stats.icmpv6dev);
err_icmp:
snmp_mib_free((void __percpu **)idev->stats.ipv6);
err_ip:
return -ENOMEM;
}
static void snmp6_free_dev(struct inet6_dev *idev)
{
kfree(idev->stats.icmpv6msgdev);
kfree(idev->stats.icmpv6dev);
snmp_mib_free((void __percpu **)idev->stats.ipv6);
}
/* Nobody refers to this device, we may destroy it. */
void in6_dev_finish_destroy(struct inet6_dev *idev)
{
struct net_device *dev = idev->dev;
WARN_ON(!list_empty(&idev->addr_list));
WARN_ON(idev->mc_list != NULL);
#ifdef NET_REFCNT_DEBUG
printk(KERN_DEBUG "in6_dev_finish_destroy: %s\n", dev ? dev->name : "NIL");
#endif
dev_put(dev);
if (!idev->dead) {
pr_warning("Freeing alive inet6 device %p\n", idev);
return;
}
snmp6_free_dev(idev);
kfree_rcu(idev, rcu);
}
EXPORT_SYMBOL(in6_dev_finish_destroy);
static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
{
struct inet6_dev *ndev;
ASSERT_RTNL();
if (dev->mtu < IPV6_MIN_MTU)
return NULL;
ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
if (ndev == NULL)
return NULL;
rwlock_init(&ndev->lock);
ndev->dev = dev;
INIT_LIST_HEAD(&ndev->addr_list);
memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
ndev->cnf.mtu6 = dev->mtu;
ndev->cnf.sysctl = NULL;
ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
if (ndev->nd_parms == NULL) {
kfree(ndev);
return NULL;
}
if (ndev->cnf.forwarding)
dev_disable_lro(dev);
/* We refer to the device */
dev_hold(dev);
if (snmp6_alloc_dev(ndev) < 0) {
ADBG((KERN_WARNING
"%s(): cannot allocate memory for statistics; dev=%s.\n",
__func__, dev->name));
neigh_parms_release(&nd_tbl, ndev->nd_parms);
dev_put(dev);
kfree(ndev);
return NULL;
}
if (snmp6_register_dev(ndev) < 0) {
ADBG((KERN_WARNING
"%s(): cannot create /proc/net/dev_snmp6/%s\n",
__func__, dev->name));
neigh_parms_release(&nd_tbl, ndev->nd_parms);
ndev->dead = 1;
in6_dev_finish_destroy(ndev);
return NULL;
}
/* One reference from device. We must do this before
* we invoke __ipv6_regen_rndid().
*/
in6_dev_hold(ndev);
if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
ndev->cnf.accept_dad = -1;
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
printk(KERN_INFO
"%s: Disabled Multicast RS\n",
dev->name);
ndev->cnf.rtr_solicits = 0;
}
#endif
#ifdef CONFIG_IPV6_PRIVACY
INIT_LIST_HEAD(&ndev->tempaddr_list);
setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
if ((dev->flags&IFF_LOOPBACK) ||
dev->type == ARPHRD_TUNNEL ||
dev->type == ARPHRD_TUNNEL6 ||
dev->type == ARPHRD_SIT ||
dev->type == ARPHRD_NONE) {
ndev->cnf.use_tempaddr = -1;
} else {
in6_dev_hold(ndev);
ipv6_regen_rndid((unsigned long) ndev);
}
#endif
if (netif_running(dev) && addrconf_qdisc_ok(dev))
ndev->if_flags |= IF_READY;
ipv6_mc_init_dev(ndev);
ndev->tstamp = jiffies;
addrconf_sysctl_register(ndev);
/* protected by rtnl_lock */
rcu_assign_pointer(dev->ip6_ptr, ndev);
/* Join all-node multicast group */
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
/* Join all-router multicast group if forwarding is set */
if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
return ndev;
}
static struct inet6_dev * ipv6_find_idev(struct net_device *dev)
{
struct inet6_dev *idev;
ASSERT_RTNL();
idev = __in6_dev_get(dev);
if (!idev) {
idev = ipv6_add_dev(dev);
if (!idev)
return NULL;
}
if (dev->flags&IFF_UP)
ipv6_mc_up(idev);
return idev;
}
#ifdef CONFIG_SYSCTL
static void dev_forward_change(struct inet6_dev *idev)
{
struct net_device *dev;
struct inet6_ifaddr *ifa;
if (!idev)
return;
dev = idev->dev;
if (idev->cnf.forwarding)
dev_disable_lro(dev);
if (dev && (dev->flags & IFF_MULTICAST)) {
if (idev->cnf.forwarding)
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
else
ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
}
list_for_each_entry(ifa, &idev->addr_list, if_list) {
if (ifa->flags&IFA_F_TENTATIVE)
continue;
if (idev->cnf.forwarding)
addrconf_join_anycast(ifa);
else
addrconf_leave_anycast(ifa);
}
}
static void addrconf_forward_change(struct net *net, __s32 newf)
{
struct net_device *dev;
struct inet6_dev *idev;
for_each_netdev(net, dev) {
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.forwarding) ^ (!newf);
idev->cnf.forwarding = newf;
if (changed)
dev_forward_change(idev);
}
}
}
static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
{
struct net *net;
int old;
if (!rtnl_trylock())
return restart_syscall();
net = (struct net *)table->extra2;
old = *p;
*p = newf;
if (p == &net->ipv6.devconf_dflt->forwarding) {
rtnl_unlock();
return 0;
}
if (p == &net->ipv6.devconf_all->forwarding) {
net->ipv6.devconf_dflt->forwarding = newf;
addrconf_forward_change(net, newf);
} else if ((!newf) ^ (!old))
dev_forward_change((struct inet6_dev *)table->extra1);
rtnl_unlock();
if (newf)
rt6_purge_dflt_routers(net);
return 1;
}
#endif
/* Nobody refers to this ifaddr, destroy it */
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
{
WARN_ON(!hlist_unhashed(&ifp->addr_lst));
#ifdef NET_REFCNT_DEBUG
printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
#endif
in6_dev_put(ifp->idev);
if (del_timer(&ifp->timer))
pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
if (ifp->state != INET6_IFADDR_STATE_DEAD) {
pr_warning("Freeing alive inet6 address %p\n", ifp);
return;
}
dst_release(&ifp->rt->dst);
kfree_rcu(ifp, rcu);
}
static void
ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
{
struct list_head *p;
int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
/*
* Each device address list is sorted in order of scope -
* global before linklocal.
*/
list_for_each(p, &idev->addr_list) {
struct inet6_ifaddr *ifa
= list_entry(p, struct inet6_ifaddr, if_list);
if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
break;
}
list_add_tail(&ifp->if_list, p);
}
static u32 ipv6_addr_hash(const struct in6_addr *addr)
{
/*
* We perform the hash function over the last 64 bits of the address
* This will include the IEEE address token on links that support it.
*/
return jhash_2words((__force u32)addr->s6_addr32[2],
(__force u32)addr->s6_addr32[3], 0)
& (IN6_ADDR_HSIZE - 1);
}
/* On success it returns ifp with increased reference count */
static struct inet6_ifaddr *
ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
int scope, u32 flags)
{
struct inet6_ifaddr *ifa = NULL;
struct rt6_info *rt;
unsigned int hash;
int err = 0;
int addr_type = ipv6_addr_type(addr);
if (addr_type == IPV6_ADDR_ANY ||
addr_type & IPV6_ADDR_MULTICAST ||
(!(idev->dev->flags & IFF_LOOPBACK) &&
addr_type & IPV6_ADDR_LOOPBACK))
return ERR_PTR(-EADDRNOTAVAIL);
rcu_read_lock_bh();
if (idev->dead) {
err = -ENODEV; /*XXX*/
goto out2;
}
if (idev->cnf.disable_ipv6) {
err = -EACCES;
goto out2;
}
spin_lock(&addrconf_hash_lock);
/* Ignore adding duplicate addresses on an interface */
if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
ADBG(("ipv6_add_addr: already assigned\n"));
err = -EEXIST;
goto out;
}
ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
if (ifa == NULL) {
ADBG(("ipv6_add_addr: malloc failed\n"));
err = -ENOBUFS;
goto out;
}
rt = addrconf_dst_alloc(idev, addr, false);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
goto out;
}
ifa->addr = *addr;
spin_lock_init(&ifa->lock);
spin_lock_init(&ifa->state_lock);
init_timer(&ifa->timer);
INIT_HLIST_NODE(&ifa->addr_lst);
ifa->timer.data = (unsigned long) ifa;
ifa->scope = scope;
ifa->prefix_len = pfxlen;
ifa->flags = flags | IFA_F_TENTATIVE;
ifa->cstamp = ifa->tstamp = jiffies;
ifa->rt = rt;
ifa->idev = idev;
in6_dev_hold(idev);
/* For caller */
in6_ifa_hold(ifa);
/* Add to big hash table */
hash = ipv6_addr_hash(addr);
hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
write_lock(&idev->lock);
/* Add to inet6_dev unicast addr list. */
ipv6_link_dev_addr(idev, ifa);
#ifdef CONFIG_IPV6_PRIVACY
if (ifa->flags&IFA_F_TEMPORARY) {
list_add(&ifa->tmp_list, &idev->tempaddr_list);
in6_ifa_hold(ifa);
}
#endif
in6_ifa_hold(ifa);
write_unlock(&idev->lock);
spin_unlock(&addrconf_hash_lock);
out2:
rcu_read_unlock_bh();
if (likely(err == 0))
atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa);
else {
kfree(ifa);
ifa = ERR_PTR(err);
}
return ifa;
out:
spin_unlock(&addrconf_hash_lock);
goto out2;
}
/* This function wants to get referenced ifp and releases it before return */
static void ipv6_del_addr(struct inet6_ifaddr *ifp)
{
struct inet6_ifaddr *ifa, *ifn;
struct inet6_dev *idev = ifp->idev;
int state;
int deleted = 0, onlink = 0;
unsigned long expires = jiffies;
spin_lock_bh(&ifp->state_lock);
state = ifp->state;
ifp->state = INET6_IFADDR_STATE_DEAD;
spin_unlock_bh(&ifp->state_lock);
if (state == INET6_IFADDR_STATE_DEAD)
goto out;
spin_lock_bh(&addrconf_hash_lock);
hlist_del_init_rcu(&ifp->addr_lst);
write_lock_bh(&idev->lock);
#ifdef CONFIG_IPV6_PRIVACY
if (ifp->flags&IFA_F_TEMPORARY) {
list_del(&ifp->tmp_list);
if (ifp->ifpub) {
in6_ifa_put(ifp->ifpub);
ifp->ifpub = NULL;
}
__in6_ifa_put(ifp);
}
#endif
list_for_each_entry_safe(ifa, ifn, &idev->addr_list, if_list) {
if (ifa == ifp) {
list_del_init(&ifp->if_list);
__in6_ifa_put(ifp);
if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0)
break;
deleted = 1;
continue;
} else if (ifp->flags & IFA_F_PERMANENT) {
if (ipv6_prefix_equal(&ifa->addr, &ifp->addr,
ifp->prefix_len)) {
if (ifa->flags & IFA_F_PERMANENT) {
onlink = 1;
if (deleted)
break;
} else {
unsigned long lifetime;
if (!onlink)
onlink = -1;
spin_lock(&ifa->lock);
lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
/*
* Note: Because this address is
* not permanent, lifetime <
* LONG_MAX / HZ here.
*/
if (time_before(expires,
ifa->tstamp + lifetime * HZ))
expires = ifa->tstamp + lifetime * HZ;
spin_unlock(&ifa->lock);
}
}
}
}
write_unlock_bh(&idev->lock);
spin_unlock_bh(&addrconf_hash_lock);
addrconf_del_timer(ifp);
ipv6_ifa_notify(RTM_DELADDR, ifp);
atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp);
/*
* Purge or update corresponding prefix
*
* 1) we don't purge prefix here if address was not permanent.
* prefix is managed by its own lifetime.
* 2) if there're no addresses, delete prefix.
* 3) if there're still other permanent address(es),
* corresponding prefix is still permanent.
* 4) otherwise, update prefix lifetime to the
* longest valid lifetime among the corresponding
* addresses on the device.
* Note: subsequent RA will update lifetime.
*
* --yoshfuji
*/
if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) {
struct in6_addr prefix;
struct rt6_info *rt;
struct net *net = dev_net(ifp->idev->dev);
struct flowi6 fl6 = {};
ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
fl6.flowi6_oif = ifp->idev->dev->ifindex;
fl6.daddr = prefix;
rt = (struct rt6_info *)ip6_route_lookup(net, &fl6,
RT6_LOOKUP_F_IFACE);
if (rt != net->ipv6.ip6_null_entry &&
addrconf_is_prefix_route(rt)) {
if (onlink == 0) {
ip6_del_rt(rt);
rt = NULL;
} else if (!(rt->rt6i_flags & RTF_EXPIRES)) {
rt6_set_expires(rt, expires);
}
}
dst_release(&rt->dst);
}
/* clean up prefsrc entries */
rt6_remove_prefsrc(ifp);
out:
in6_ifa_put(ifp);
}
#ifdef CONFIG_IPV6_PRIVACY
static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *ift)
{
struct inet6_dev *idev = ifp->idev;
struct in6_addr addr, *tmpaddr;
unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
unsigned long regen_advance;
int tmp_plen;
int ret = 0;
int max_addresses;
u32 addr_flags;
unsigned long now = jiffies;
write_lock(&idev->lock);
if (ift) {
spin_lock_bh(&ift->lock);
memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
spin_unlock_bh(&ift->lock);
tmpaddr = &addr;
} else {
tmpaddr = NULL;
}
retry:
in6_dev_hold(idev);
if (idev->cnf.use_tempaddr <= 0) {
write_unlock(&idev->lock);
printk(KERN_INFO
"ipv6_create_tempaddr(): use_tempaddr is disabled.\n");
in6_dev_put(idev);
ret = -1;
goto out;
}
spin_lock_bh(&ifp->lock);
if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
idev->cnf.use_tempaddr = -1; /*XXX*/
spin_unlock_bh(&ifp->lock);
write_unlock(&idev->lock);
printk(KERN_WARNING
"ipv6_create_tempaddr(): regeneration time exceeded. disabled temporary address support.\n");
in6_dev_put(idev);
ret = -1;
goto out;
}
in6_ifa_hold(ifp);
memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) {
spin_unlock_bh(&ifp->lock);
write_unlock(&idev->lock);
printk(KERN_WARNING
"ipv6_create_tempaddr(): regeneration of randomized interface id failed.\n");
in6_ifa_put(ifp);
in6_dev_put(idev);
ret = -1;
goto out;
}
memcpy(&addr.s6_addr[8], idev->rndid, 8);
age = (now - ifp->tstamp) / HZ;
tmp_valid_lft = min_t(__u32,
ifp->valid_lft,
idev->cnf.temp_valid_lft + age);
tmp_prefered_lft = min_t(__u32,
ifp->prefered_lft,
idev->cnf.temp_prefered_lft + age -
idev->cnf.max_desync_factor);
tmp_plen = ifp->prefix_len;
max_addresses = idev->cnf.max_addresses;
tmp_tstamp = ifp->tstamp;
spin_unlock_bh(&ifp->lock);
regen_advance = idev->cnf.regen_max_retry *
idev->cnf.dad_transmits *
idev->nd_parms->retrans_time / HZ;
write_unlock(&idev->lock);
/* A temporary address is created only if this calculated Preferred
* Lifetime is greater than REGEN_ADVANCE time units. In particular,
* an implementation must not create a temporary address with a zero
* Preferred Lifetime.
* Use age calculation as in addrconf_verify to avoid unnecessary
* temporary addresses being generated.
*/
age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
if (tmp_prefered_lft <= regen_advance + age) {
in6_ifa_put(ifp);
in6_dev_put(idev);
ret = -1;
goto out;
}
addr_flags = IFA_F_TEMPORARY;
/* set in addrconf_prefix_rcv() */
if (ifp->flags & IFA_F_OPTIMISTIC)
addr_flags |= IFA_F_OPTIMISTIC;
ift = ipv6_add_addr(idev, &addr, tmp_plen,
ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
addr_flags);
if (IS_ERR(ift)) {
in6_ifa_put(ifp);
in6_dev_put(idev);
printk(KERN_INFO
"ipv6_create_tempaddr(): retry temporary address regeneration.\n");
tmpaddr = &addr;
write_lock(&idev->lock);
goto retry;
}
spin_lock_bh(&ift->lock);
ift->ifpub = ifp;
ift->valid_lft = tmp_valid_lft;
ift->prefered_lft = tmp_prefered_lft;
ift->cstamp = now;
ift->tstamp = tmp_tstamp;
spin_unlock_bh(&ift->lock);
addrconf_dad_start(ift, 0);
in6_ifa_put(ift);
in6_dev_put(idev);
out:
return ret;
}
#endif
/*
* Choose an appropriate source address (RFC3484)
*/
enum {
IPV6_SADDR_RULE_INIT = 0,
IPV6_SADDR_RULE_LOCAL,
IPV6_SADDR_RULE_SCOPE,
IPV6_SADDR_RULE_PREFERRED,
#ifdef CONFIG_IPV6_MIP6
IPV6_SADDR_RULE_HOA,
#endif
IPV6_SADDR_RULE_OIF,
IPV6_SADDR_RULE_LABEL,
#ifdef CONFIG_IPV6_PRIVACY
IPV6_SADDR_RULE_PRIVACY,
#endif
IPV6_SADDR_RULE_ORCHID,
IPV6_SADDR_RULE_PREFIX,
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
IPV6_SADDR_RULE_NOT_OPTIMISTIC,
#endif
IPV6_SADDR_RULE_MAX
};
struct ipv6_saddr_score {
int rule;
int addr_type;
struct inet6_ifaddr *ifa;
DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
int scopedist;
int matchlen;
};
struct ipv6_saddr_dst {
const struct in6_addr *addr;
int ifindex;
int scope;
int label;
unsigned int prefs;
};
static inline int ipv6_saddr_preferred(int type)
{
if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
return 1;
return 0;
}
static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev)
{
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic;
#else
return false;
#endif
}
static int ipv6_get_saddr_eval(struct net *net,
struct ipv6_saddr_score *score,
struct ipv6_saddr_dst *dst,
int i)
{
int ret;
if (i <= score->rule) {
switch (i) {
case IPV6_SADDR_RULE_SCOPE:
ret = score->scopedist;
break;
case IPV6_SADDR_RULE_PREFIX:
ret = score->matchlen;
break;
default:
ret = !!test_bit(i, score->scorebits);
}
goto out;
}
switch (i) {
case IPV6_SADDR_RULE_INIT:
/* Rule 0: remember if hiscore is not ready yet */
ret = !!score->ifa;
break;
case IPV6_SADDR_RULE_LOCAL:
/* Rule 1: Prefer same address */
ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
break;
case IPV6_SADDR_RULE_SCOPE:
/* Rule 2: Prefer appropriate scope
*
* ret
* ^
* -1 | d 15
* ---+--+-+---> scope
* |
* | d is scope of the destination.
* B-d | \
* | \ <- smaller scope is better if
* B-15 | \ if scope is enough for destinaion.
* | ret = B - scope (-1 <= scope >= d <= 15).
* d-C-1 | /
* |/ <- greater is better
* -C / if scope is not enough for destination.
* /| ret = scope - C (-1 <= d < scope <= 15).
*
* d - C - 1 < B -15 (for all -1 <= d <= 15).
* C > d + 14 - B >= 15 + 14 - B = 29 - B.
* Assume B = 0 and we get C > 29.
*/
ret = __ipv6_addr_src_scope(score->addr_type);
if (ret >= dst->scope)
ret = -ret;
else
ret -= 128; /* 30 is enough */
score->scopedist = ret;
break;
case IPV6_SADDR_RULE_PREFERRED:
{
/* Rule 3: Avoid deprecated and optimistic addresses */
u8 avoid = IFA_F_DEPRECATED;
if (!ipv6_use_optimistic_addr(score->ifa->idev))
avoid |= IFA_F_OPTIMISTIC;
ret = ipv6_saddr_preferred(score->addr_type) ||
!(score->ifa->flags & avoid);
break;
}
#ifdef CONFIG_IPV6_MIP6
case IPV6_SADDR_RULE_HOA:
{
/* Rule 4: Prefer home address */
int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
break;
}
#endif
case IPV6_SADDR_RULE_OIF:
/* Rule 5: Prefer outgoing interface */
ret = (!dst->ifindex ||
dst->ifindex == score->ifa->idev->dev->ifindex);
break;
case IPV6_SADDR_RULE_LABEL:
/* Rule 6: Prefer matching label */
ret = ipv6_addr_label(net,
&score->ifa->addr, score->addr_type,
score->ifa->idev->dev->ifindex) == dst->label;
break;
#ifdef CONFIG_IPV6_PRIVACY
case IPV6_SADDR_RULE_PRIVACY:
{
/* Rule 7: Prefer public address
* Note: prefer temporary address if use_tempaddr >= 2
*/
int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
!!(dst->prefs & IPV6_PREFER_SRC_TMP) :
score->ifa->idev->cnf.use_tempaddr >= 2;
ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
break;
}
#endif
case IPV6_SADDR_RULE_ORCHID:
/* Rule 8-: Prefer ORCHID vs ORCHID or
* non-ORCHID vs non-ORCHID
*/
ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
ipv6_addr_orchid(dst->addr));
break;
case IPV6_SADDR_RULE_PREFIX:
/* Rule 8: Use longest matching prefix */
score->matchlen = ret = ipv6_addr_diff(&score->ifa->addr,
dst->addr);
break;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
/* Optimistic addresses still have lower precedence than other
* preferred addresses.
*/
ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
break;
#endif
default:
ret = 0;
}
if (ret)
__set_bit(i, score->scorebits);
score->rule = i;
out:
return ret;
}
int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
const struct in6_addr *daddr, unsigned int prefs,
struct in6_addr *saddr)
{
struct ipv6_saddr_score scores[2],
*score = &scores[0], *hiscore = &scores[1];
struct ipv6_saddr_dst dst;
struct net_device *dev;
int dst_type;
dst_type = __ipv6_addr_type(daddr);
dst.addr = daddr;
dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
dst.scope = __ipv6_addr_src_scope(dst_type);
dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
dst.prefs = prefs;
hiscore->rule = -1;
hiscore->ifa = NULL;
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
struct inet6_dev *idev;
/* Candidate Source Address (section 4)
* - multicast and link-local destination address,
* the set of candidate source address MUST only
* include addresses assigned to interfaces
* belonging to the same link as the outgoing
* interface.
* (- For site-local destination addresses, the
* set of candidate source addresses MUST only
* include addresses assigned to interfaces
* belonging to the same site as the outgoing
* interface.)
*/
if (((dst_type & IPV6_ADDR_MULTICAST) ||
dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL) &&
dst.ifindex && dev->ifindex != dst.ifindex)
continue;
idev = __in6_dev_get(dev);
if (!idev)
continue;
read_lock_bh(&idev->lock);
list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
int i;
/*
* - Tentative Address (RFC2462 section 5.4)
* - A tentative address is not considered
* "assigned to an interface" in the traditional
* sense, unless it is also flagged as optimistic.
* - Candidate Source Address (section 4)
* - In any case, anycast addresses, multicast
* addresses, and the unspecified address MUST
* NOT be included in a candidate set.
*/
if ((score->ifa->flags & IFA_F_TENTATIVE) &&
(!(score->ifa->flags & IFA_F_OPTIMISTIC)))
continue;
score->addr_type = __ipv6_addr_type(&score->ifa->addr);
if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
score->addr_type & IPV6_ADDR_MULTICAST)) {
LIMIT_NETDEBUG(KERN_DEBUG
"ADDRCONF: unspecified / multicast address "
"assigned as unicast address on %s",
dev->name);
continue;
}
score->rule = -1;
bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
int minihiscore, miniscore;
minihiscore = ipv6_get_saddr_eval(net, hiscore, &dst, i);
miniscore = ipv6_get_saddr_eval(net, score, &dst, i);
if (minihiscore > miniscore) {
if (i == IPV6_SADDR_RULE_SCOPE &&
score->scopedist > 0) {
/*
* special case:
* each remaining entry
* has too small (not enough)
* scope, because ifa entries
* are sorted by their scope
* values.
*/
goto try_nextdev;
}
break;
} else if (minihiscore < miniscore) {
if (hiscore->ifa)
in6_ifa_put(hiscore->ifa);
in6_ifa_hold(score->ifa);
swap(hiscore, score);
/* restore our iterator */
score->ifa = hiscore->ifa;
break;
}
}
}
try_nextdev:
read_unlock_bh(&idev->lock);
}
rcu_read_unlock();
if (!hiscore->ifa)
return -EADDRNOTAVAIL;
*saddr = hiscore->ifa->addr;
in6_ifa_put(hiscore->ifa);
return 0;
}
EXPORT_SYMBOL(ipv6_dev_get_saddr);
int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
unsigned char banned_flags)
{
struct inet6_ifaddr *ifp;
int err = -EADDRNOTAVAIL;
list_for_each_entry(ifp, &idev->addr_list, if_list) {
if (ifp->scope == IFA_LINK &&
!(ifp->flags & banned_flags)) {
*addr = ifp->addr;
err = 0;
break;
}
}
return err;
}
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
unsigned char banned_flags)
{
struct inet6_dev *idev;
int err = -EADDRNOTAVAIL;
rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev) {
read_lock_bh(&idev->lock);
err = __ipv6_get_lladdr(idev, addr, banned_flags);
read_unlock_bh(&idev->lock);
}
rcu_read_unlock();
return err;
}
static int ipv6_count_addresses(struct inet6_dev *idev)
{
int cnt = 0;
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
list_for_each_entry(ifp, &idev->addr_list, if_list)
cnt++;
read_unlock_bh(&idev->lock);
return cnt;
}
int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict)
{
struct inet6_ifaddr *ifp;
struct hlist_node *node;
unsigned int hash = ipv6_addr_hash(addr);
rcu_read_lock_bh();
hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr) &&
(!(ifp->flags&IFA_F_TENTATIVE) ||
(ipv6_use_optimistic_addr(ifp->idev) &&
ifp->flags&IFA_F_OPTIMISTIC)) &&
(dev == NULL || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
rcu_read_unlock_bh();
return 1;
}
}
rcu_read_unlock_bh();
return 0;
}
EXPORT_SYMBOL(ipv6_chk_addr);
static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
struct net_device *dev)
{
unsigned int hash = ipv6_addr_hash(addr);
struct inet6_ifaddr *ifp;
struct hlist_node *node;
hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
if (dev == NULL || ifp->idev->dev == dev)
return true;
}
}
return false;
}
int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
{
struct inet6_dev *idev;
struct inet6_ifaddr *ifa;
int onlink;
onlink = 0;
rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev) {
read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
onlink = ipv6_prefix_equal(addr, &ifa->addr,
ifa->prefix_len);
if (onlink)
break;
}
read_unlock_bh(&idev->lock);
}
rcu_read_unlock();
return onlink;
}
EXPORT_SYMBOL(ipv6_chk_prefix);
struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
struct net_device *dev, int strict)
{
struct inet6_ifaddr *ifp, *result = NULL;
unsigned int hash = ipv6_addr_hash(addr);
struct hlist_node *node;
rcu_read_lock_bh();
hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
if (dev == NULL || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
result = ifp;
in6_ifa_hold(ifp);
break;
}
}
}
rcu_read_unlock_bh();
return result;
}
/* Gets referenced address, destroys ifaddr */
static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
{
if (ifp->flags&IFA_F_PERMANENT) {
spin_lock_bh(&ifp->lock);
addrconf_del_timer(ifp);
ifp->flags |= IFA_F_TENTATIVE;
if (dad_failed)
ifp->flags |= IFA_F_DADFAILED;
spin_unlock_bh(&ifp->lock);
if (dad_failed)
ipv6_ifa_notify(0, ifp);
in6_ifa_put(ifp);
#ifdef CONFIG_IPV6_PRIVACY
} else if (ifp->flags&IFA_F_TEMPORARY) {
struct inet6_ifaddr *ifpub;
spin_lock_bh(&ifp->lock);
ifpub = ifp->ifpub;
if (ifpub) {
in6_ifa_hold(ifpub);
spin_unlock_bh(&ifp->lock);
ipv6_create_tempaddr(ifpub, ifp);
in6_ifa_put(ifpub);
} else {
spin_unlock_bh(&ifp->lock);
}
ipv6_del_addr(ifp);
#endif
} else
ipv6_del_addr(ifp);
}
static int addrconf_dad_end(struct inet6_ifaddr *ifp)
{
int err = -ENOENT;
spin_lock(&ifp->state_lock);
if (ifp->state == INET6_IFADDR_STATE_DAD) {
ifp->state = INET6_IFADDR_STATE_POSTDAD;
err = 0;
}
spin_unlock(&ifp->state_lock);
return err;
}
void addrconf_dad_failure(struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
if (addrconf_dad_end(ifp)) {
in6_ifa_put(ifp);
return;
}
if (net_ratelimit())
printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
ifp->idev->dev->name, &ifp->addr);
if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
struct in6_addr addr;
addr.s6_addr32[0] = htonl(0xfe800000);
addr.s6_addr32[1] = 0;
if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
ipv6_addr_equal(&ifp->addr, &addr)) {
/* DAD failed for link-local based on MAC address */
idev->cnf.disable_ipv6 = 1;
printk(KERN_INFO "%s: IPv6 being disabled!\n",
ifp->idev->dev->name);
}
}
addrconf_dad_stop(ifp, 1);
}
/* Join to solicited addr multicast group. */
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
{
struct in6_addr maddr;
if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
addrconf_addr_solict_mult(addr, &maddr);
ipv6_dev_mc_inc(dev, &maddr);
}
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct in6_addr maddr;
if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
addrconf_addr_solict_mult(addr, &maddr);
__ipv6_dev_mc_dec(idev, &maddr);
}
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
if (ifp->prefix_len == 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
return;
ipv6_dev_ac_inc(ifp->idev->dev, &addr);
}
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
if (ifp->prefix_len == 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
return;
__ipv6_dev_ac_dec(ifp->idev, &addr);
}
static int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
{
if (dev->addr_len != ETH_ALEN)
return -1;
memcpy(eui, dev->dev_addr, 3);
memcpy(eui + 5, dev->dev_addr + 3, 3);
/*
* The zSeries OSA network cards can be shared among various
* OS instances, but the OSA cards have only one MAC address.
* This leads to duplicate address conflicts in conjunction
* with IPv6 if more than one instance uses the same card.
*
* The driver for these cards can deliver a unique 16-bit
* identifier for each instance sharing the same card. It is
* placed instead of 0xFFFE in the interface identifier. The
* "u" bit of the interface identifier is not inverted in this
* case. Hence the resulting interface identifier has local
* scope according to RFC2373.
*/
if (dev->dev_id) {
eui[3] = (dev->dev_id >> 8) & 0xFF;
eui[4] = dev->dev_id & 0xFF;
} else {
eui[3] = 0xFF;
eui[4] = 0xFE;
eui[0] ^= 2;
}
return 0;
}
static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
{
/* XXX: inherit EUI-64 from other interface -- yoshfuji */
if (dev->addr_len != ARCNET_ALEN)
return -1;
memset(eui, 0, 7);
eui[7] = *(u8*)dev->dev_addr;
return 0;
}
static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
{
if (dev->addr_len != INFINIBAND_ALEN)
return -1;
memcpy(eui, dev->dev_addr + 12, 8);
eui[0] |= 2;
return 0;
}
static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
{
if (addr == 0)
return -1;
eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
eui[1] = 0;
eui[2] = 0x5E;
eui[3] = 0xFE;
memcpy(eui + 4, &addr, 4);
return 0;
}
static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
{
if (dev->priv_flags & IFF_ISATAP)
return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
return -1;
}
static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
{
return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
}
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
{
switch (dev->type) {
case ARPHRD_ETHER:
case ARPHRD_FDDI:
case ARPHRD_IEEE802_TR:
return addrconf_ifid_eui48(eui, dev);
case ARPHRD_ARCNET:
return addrconf_ifid_arcnet(eui, dev);
case ARPHRD_INFINIBAND:
return addrconf_ifid_infiniband(eui, dev);
case ARPHRD_SIT:
return addrconf_ifid_sit(eui, dev);
case ARPHRD_IPGRE:
return addrconf_ifid_gre(eui, dev);
case ARPHRD_RAWIP: {
struct in6_addr lladdr;
if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
get_random_bytes(eui, 8);
else
memcpy(eui, lladdr.s6_addr + 8, 8);
return 0;
}
}
return -1;
}
static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
{
int err = -1;
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
list_for_each_entry(ifp, &idev->addr_list, if_list) {
if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
memcpy(eui, ifp->addr.s6_addr+8, 8);
err = 0;
break;
}
}
read_unlock_bh(&idev->lock);
return err;
}
#ifdef CONFIG_IPV6_PRIVACY
/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
static int __ipv6_regen_rndid(struct inet6_dev *idev)
{
regen:
get_random_bytes(idev->rndid, sizeof(idev->rndid));
idev->rndid[0] &= ~0x02;
/*
* <draft-ietf-ipngwg-temp-addresses-v2-00.txt>:
* check if generated address is not inappropriate
*
* - Reserved subnet anycast (RFC 2526)
* 11111101 11....11 1xxxxxxx
* - ISATAP (RFC4214) 6.1
* 00-00-5E-FE-xx-xx-xx-xx
* - value 0
* - XXX: already assigned to an address on the device
*/
if (idev->rndid[0] == 0xfd &&
(idev->rndid[1]&idev->rndid[2]&idev->rndid[3]&idev->rndid[4]&idev->rndid[5]&idev->rndid[6]) == 0xff &&
(idev->rndid[7]&0x80))
goto regen;
if ((idev->rndid[0]|idev->rndid[1]) == 0) {
if (idev->rndid[2] == 0x5e && idev->rndid[3] == 0xfe)
goto regen;
if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
goto regen;
}
return 0;
}
static void ipv6_regen_rndid(unsigned long data)
{
struct inet6_dev *idev = (struct inet6_dev *) data;
unsigned long expires;
rcu_read_lock_bh();
write_lock_bh(&idev->lock);
if (idev->dead)
goto out;
if (__ipv6_regen_rndid(idev) < 0)
goto out;
expires = jiffies +
idev->cnf.temp_prefered_lft * HZ -
idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time -
idev->cnf.max_desync_factor * HZ;
if (time_before(expires, jiffies)) {
printk(KERN_WARNING
"ipv6_regen_rndid(): too short regeneration interval; timer disabled for %s.\n",
idev->dev->name);
goto out;
}
if (!mod_timer(&idev->regen_timer, expires))
in6_dev_hold(idev);
out:
write_unlock_bh(&idev->lock);
rcu_read_unlock_bh();
in6_dev_put(idev);
}
static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) {
int ret = 0;
if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
ret = __ipv6_regen_rndid(idev);
return ret;
}
#endif
u32 addrconf_rt_table(const struct net_device *dev, u32 default_table) {
/* Determines into what table to put autoconf PIO/RIO/default routes
* learned on this device.
*
* - If 0, use the same table for every device. This puts routes into
* one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
* (but note that these three are currently all equal to
* RT6_TABLE_MAIN).
* - If > 0, use the specified table.
* - If < 0, put routes into table dev->ifindex + (-rt_table).
*/
struct inet6_dev *idev = in6_dev_get(dev);
u32 table;
int sysctl = idev->cnf.accept_ra_rt_table;
if (sysctl == 0) {
table = default_table;
} else if (sysctl > 0) {
table = (u32) sysctl;
} else {
table = (unsigned) dev->ifindex + (-sysctl);
}
in6_dev_put(idev);
return table;
}
/*
* Add prefix route.
*/
static void
addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
unsigned long expires, u32 flags)
{
struct fib6_config cfg = {
.fc_table = addrconf_rt_table(dev, RT6_TABLE_PREFIX),
.fc_metric = IP6_RT_PRIO_ADDRCONF,
.fc_ifindex = dev->ifindex,
.fc_expires = expires,
.fc_dst_len = plen,
.fc_flags = RTF_UP | flags,
.fc_nlinfo.nl_net = dev_net(dev),
.fc_protocol = RTPROT_KERNEL,
};
cfg.fc_dst = *pfx;
/* Prevent useless cloning on PtP SIT.
This thing is done here expecting that the whole
class of non-broadcast devices need not cloning.
*/
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
cfg.fc_flags |= RTF_NONEXTHOP;
#endif
ip6_route_add(&cfg);
}
static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
int plen,
const struct net_device *dev,
u32 flags, u32 noflags)
{
struct fib6_node *fn;
struct rt6_info *rt = NULL;
struct fib6_table *table;
table = fib6_get_table(dev_net(dev),
addrconf_rt_table(dev, RT6_TABLE_PREFIX));
if (table == NULL)
return NULL;
write_lock_bh(&table->tb6_lock);
fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
if (!fn)
goto out;
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
if (rt->dst.dev->ifindex != dev->ifindex)
continue;
if ((rt->rt6i_flags & flags) != flags)
continue;
if ((rt->rt6i_flags & noflags) != 0)
continue;
dst_hold(&rt->dst);
break;
}
out:
write_unlock_bh(&table->tb6_lock);
return rt;
}
/* Create "default" multicast route to the interface */
static void addrconf_add_mroute(struct net_device *dev)
{
struct fib6_config cfg = {
.fc_table = RT6_TABLE_LOCAL,
.fc_metric = IP6_RT_PRIO_ADDRCONF,
.fc_ifindex = dev->ifindex,
.fc_dst_len = 8,
.fc_flags = RTF_UP,
.fc_nlinfo.nl_net = dev_net(dev),
};
ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
ip6_route_add(&cfg);
}
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
static void sit_route_add(struct net_device *dev)
{
struct fib6_config cfg = {
.fc_table = RT6_TABLE_MAIN,
.fc_metric = IP6_RT_PRIO_ADDRCONF,
.fc_ifindex = dev->ifindex,
.fc_dst_len = 96,
.fc_flags = RTF_UP | RTF_NONEXTHOP,
.fc_nlinfo.nl_net = dev_net(dev),
};
/* prefix length - 96 bits "::d.d.d.d" */
ip6_route_add(&cfg);
}
#endif
static void addrconf_add_lroute(struct net_device *dev)
{
struct in6_addr addr;
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
addrconf_prefix_route(&addr, 64, dev, 0, 0);
}
static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
{
struct inet6_dev *idev;
ASSERT_RTNL();
idev = ipv6_find_idev(dev);
if (!idev)
return ERR_PTR(-ENOBUFS);
if (idev->cnf.disable_ipv6)
return ERR_PTR(-EACCES);
/* Add default multicast route */
if (!(dev->flags & IFF_LOOPBACK))
addrconf_add_mroute(dev);
/* Add link local route */
addrconf_add_lroute(dev);
return idev;
}
void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
{
struct prefix_info *pinfo;
__u32 valid_lft;
__u32 prefered_lft;
int addr_type;
struct inet6_dev *in6_dev;
struct net *net = dev_net(dev);
//
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
printk(KERN_DEBUG "[LGE_DATA][%s()] The prefix is received now !", __func__);
#endif
//
pinfo = (struct prefix_info *) opt;
if (len < sizeof(struct prefix_info)) {
ADBG(("addrconf: prefix option too short\n"));
return;
}
/*
* Validation checks ([ADDRCONF], page 19)
*/
addr_type = ipv6_addr_type(&pinfo->prefix);
if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
return;
valid_lft = ntohl(pinfo->valid);
prefered_lft = ntohl(pinfo->prefered);
if (prefered_lft > valid_lft) {
if (net_ratelimit())
printk(KERN_WARNING "addrconf: prefix option has invalid lifetime\n");
return;
}
in6_dev = in6_dev_get(dev);
if (in6_dev == NULL) {
if (net_ratelimit())
printk(KERN_DEBUG "addrconf: device %s not configured\n", dev->name);
return;
}
/*
* Two things going on here:
* 1) Add routes for on-link prefixes
* 2) Configure prefixes with the auto flag set
*/
if (pinfo->onlink) {
struct rt6_info *rt;
unsigned long rt_expires;
/* Avoid arithmetic overflow. Really, we could
* save rt_expires in seconds, likely valid_lft,
* but it would require division in fib gc, that it
* not good.
*/
if (HZ > USER_HZ)
rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
else
rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
if (addrconf_finite_timeout(rt_expires))
rt_expires *= HZ;
rt = addrconf_get_prefix_route(&pinfo->prefix,
pinfo->prefix_len,
dev,
RTF_ADDRCONF | RTF_PREFIX_RT,
RTF_GATEWAY | RTF_DEFAULT);
if (rt) {
/* Autoconf prefix route */
if (valid_lft == 0) {
ip6_del_rt(rt);
rt = NULL;
} else if (addrconf_finite_timeout(rt_expires)) {
/* not infinity */
rt6_set_expires(rt, jiffies + rt_expires);
} else {
rt6_clean_expires(rt);
}
} else if (valid_lft) {
clock_t expires = 0;
int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
if (addrconf_finite_timeout(rt_expires)) {
/* not infinity */
flags |= RTF_EXPIRES;
expires = jiffies_to_clock_t(rt_expires);
}
if (dev->ip6_ptr->cnf.accept_ra_prefix_route) {
addrconf_prefix_route(&pinfo->prefix,
pinfo->prefix_len, dev, expires, flags);
}
}
if (rt)
dst_release(&rt->dst);
}
/* Try to figure out our local address for this prefix */
if (pinfo->autoconf && in6_dev->cnf.autoconf) {
struct inet6_ifaddr * ifp;
struct in6_addr addr;
int create = 0, update_lft = 0;
if (pinfo->prefix_len == 64) {
memcpy(&addr, &pinfo->prefix, 8);
if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
in6_dev_put(in6_dev);
return;
}
goto ok;
}
if (net_ratelimit())
printk(KERN_DEBUG "IPv6 addrconf: prefix with wrong length %d\n",
pinfo->prefix_len);
in6_dev_put(in6_dev);
return;
ok:
ifp = ipv6_get_ifaddr(net, &addr, dev, 1);
if (ifp == NULL && valid_lft) {
int max_addresses = in6_dev->cnf.max_addresses;
u32 addr_flags = 0;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (in6_dev->cnf.optimistic_dad &&
!net->ipv6.devconf_all->forwarding && sllao)
addr_flags = IFA_F_OPTIMISTIC;
#endif
/* Do not allow to create too much of autoconfigured
* addresses; this would be too easy way to crash kernel.
*/
if (!max_addresses ||
ipv6_count_addresses(in6_dev) < max_addresses)
ifp = ipv6_add_addr(in6_dev, &addr, pinfo->prefix_len,
addr_type&IPV6_ADDR_SCOPE_MASK,
addr_flags);
if (!ifp || IS_ERR(ifp)) {
in6_dev_put(in6_dev);
return;
}
update_lft = create = 1;
ifp->cstamp = jiffies;
addrconf_dad_start(ifp, RTF_ADDRCONF|RTF_PREFIX_RT);
}
if (ifp) {
int flags;
unsigned long now;
#ifdef CONFIG_IPV6_PRIVACY
struct inet6_ifaddr *ift;
#endif
u32 stored_lft;
/* update lifetime (RFC2462 5.5.3 e) */
spin_lock(&ifp->lock);
now = jiffies;
if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
else
stored_lft = 0;
if (!update_lft && stored_lft) {
if (valid_lft > MIN_VALID_LIFETIME ||
valid_lft > stored_lft)
update_lft = 1;
else if (stored_lft <= MIN_VALID_LIFETIME) {
/* valid_lft <= stored_lft is always true */
/*
* RFC 4862 Section 5.5.3e:
* "Note that the preferred lifetime of
* the corresponding address is always
* reset to the Preferred Lifetime in
* the received Prefix Information
* option, regardless of whether the
* valid lifetime is also reset or
* ignored."
*
* So if the preferred lifetime in
* this advertisement is different
* than what we have stored, but the
* valid lifetime is invalid, just
* reset prefered_lft.
*
* We must set the valid lifetime
* to the stored lifetime since we'll
* be updating the timestamp below,
* else we'll set it back to the
* minimum.
*/
if (prefered_lft != ifp->prefered_lft) {
valid_lft = stored_lft;
update_lft = 1;
}
} else {
valid_lft = MIN_VALID_LIFETIME;
if (valid_lft < prefered_lft)
prefered_lft = valid_lft;
update_lft = 1;
}
}
if (update_lft) {
ifp->valid_lft = valid_lft;
ifp->prefered_lft = prefered_lft;
ifp->tstamp = now;
flags = ifp->flags;
ifp->flags &= ~IFA_F_DEPRECATED;
spin_unlock(&ifp->lock);
if (!(flags&IFA_F_TENTATIVE))
ipv6_ifa_notify(0, ifp);
} else
spin_unlock(&ifp->lock);
#ifdef CONFIG_IPV6_PRIVACY
read_lock_bh(&in6_dev->lock);
/* update all temporary addresses in the list */
list_for_each_entry(ift, &in6_dev->tempaddr_list,
tmp_list) {
int age, max_valid, max_prefered;
if (ifp != ift->ifpub)
continue;
/*
* RFC 4941 section 3.3:
* If a received option will extend the lifetime
* of a public address, the lifetimes of
* temporary addresses should be extended,
* subject to the overall constraint that no
* temporary addresses should ever remain
* "valid" or "preferred" for a time longer than
* (TEMP_VALID_LIFETIME) or
* (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
* respectively.
*/
age = (now - ift->cstamp) / HZ;
max_valid = in6_dev->cnf.temp_valid_lft - age;
if (max_valid < 0)
max_valid = 0;
max_prefered = in6_dev->cnf.temp_prefered_lft -
in6_dev->cnf.max_desync_factor -
age;
if (max_prefered < 0)
max_prefered = 0;
if (valid_lft > max_valid)
valid_lft = max_valid;
if (prefered_lft > max_prefered)
prefered_lft = max_prefered;
spin_lock(&ift->lock);
flags = ift->flags;
ift->valid_lft = valid_lft;
ift->prefered_lft = prefered_lft;
ift->tstamp = now;
if (prefered_lft > 0)
ift->flags &= ~IFA_F_DEPRECATED;
spin_unlock(&ift->lock);
if (!(flags&IFA_F_TENTATIVE))
ipv6_ifa_notify(0, ift);
}
if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
/*
* When a new public address is created as
* described in [ADDRCONF], also create a new
* temporary address. Also create a temporary
* address if it's enabled but no temporary
* address currently exists.
*/
read_unlock_bh(&in6_dev->lock);
ipv6_create_tempaddr(ifp, NULL);
} else {
read_unlock_bh(&in6_dev->lock);
}
#endif
in6_ifa_put(ifp);
addrconf_verify(0);
}
}
inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
in6_dev_put(in6_dev);
}
/*
* Set destination address.
* Special case for SIT interfaces where we create a new "virtual"
* device.
*/
int addrconf_set_dstaddr(struct net *net, void __user *arg)
{
struct in6_ifreq ireq;
struct net_device *dev;
int err = -EINVAL;
rtnl_lock();
err = -EFAULT;
if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
goto err_exit;
dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
err = -ENODEV;
if (dev == NULL)
goto err_exit;
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
if (dev->type == ARPHRD_SIT) {
const struct net_device_ops *ops = dev->netdev_ops;
struct ifreq ifr;
struct ip_tunnel_parm p;
err = -EADDRNOTAVAIL;
if (!(ipv6_addr_type(&ireq.ifr6_addr) & IPV6_ADDR_COMPATv4))
goto err_exit;
memset(&p, 0, sizeof(p));
p.iph.daddr = ireq.ifr6_addr.s6_addr32[3];
p.iph.saddr = 0;
p.iph.version = 4;
p.iph.ihl = 5;
p.iph.protocol = IPPROTO_IPV6;
p.iph.ttl = 64;
ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
if (ops->ndo_do_ioctl) {
mm_segment_t oldfs = get_fs();
set_fs(KERNEL_DS);
err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
set_fs(oldfs);
} else
err = -EOPNOTSUPP;
if (err == 0) {
err = -ENOBUFS;
dev = __dev_get_by_name(net, p.name);
if (!dev)
goto err_exit;
err = dev_open(dev);
}
}
#endif
err_exit:
rtnl_unlock();
return err;
}
/*
* Manual configuration of address on an interface
*/
static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx,
unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
__u32 valid_lft)
{
struct inet6_ifaddr *ifp;
struct inet6_dev *idev;
struct net_device *dev;
int scope;
u32 flags;
clock_t expires;
unsigned long timeout;
ASSERT_RTNL();
if (plen > 128)
return -EINVAL;
/* check the lifetime */
if (!valid_lft || prefered_lft > valid_lft)
return -EINVAL;
dev = __dev_get_by_index(net, ifindex);
if (!dev)
return -ENODEV;
idev = addrconf_add_dev(dev);
if (IS_ERR(idev))
return PTR_ERR(idev);
scope = ipv6_addr_scope(pfx);
timeout = addrconf_timeout_fixup(valid_lft, HZ);
if (addrconf_finite_timeout(timeout)) {
expires = jiffies_to_clock_t(timeout * HZ);
valid_lft = timeout;
flags = RTF_EXPIRES;
} else {
expires = 0;
flags = 0;
ifa_flags |= IFA_F_PERMANENT;
}
timeout = addrconf_timeout_fixup(prefered_lft, HZ);
if (addrconf_finite_timeout(timeout)) {
if (timeout == 0)
ifa_flags |= IFA_F_DEPRECATED;
prefered_lft = timeout;
}
ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
if (!IS_ERR(ifp)) {
spin_lock_bh(&ifp->lock);
ifp->valid_lft = valid_lft;
ifp->prefered_lft = prefered_lft;
ifp->tstamp = jiffies;
spin_unlock_bh(&ifp->lock);
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
expires, flags);
/*
* Note that section 3.1 of RFC 4429 indicates
* that the Optimistic flag should not be set for
* manually configured addresses
*/
addrconf_dad_start(ifp, 0);
in6_ifa_put(ifp);
addrconf_verify(0);
return 0;
}
return PTR_ERR(ifp);
}
static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *pfx,
unsigned int plen)
{
struct inet6_ifaddr *ifp;
struct inet6_dev *idev;
struct net_device *dev;
if (plen > 128)
return -EINVAL;
dev = __dev_get_by_index(net, ifindex);
if (!dev)
return -ENODEV;
if ((idev = __in6_dev_get(dev)) == NULL)
return -ENXIO;
read_lock_bh(&idev->lock);
list_for_each_entry(ifp, &idev->addr_list, if_list) {
if (ifp->prefix_len == plen &&
ipv6_addr_equal(pfx, &ifp->addr)) {
in6_ifa_hold(ifp);
read_unlock_bh(&idev->lock);
ipv6_del_addr(ifp);
/* If the last address is deleted administratively,
disable IPv6 on this interface.
*/
if (list_empty(&idev->addr_list))
addrconf_ifdown(idev->dev, 1);
return 0;
}
}
read_unlock_bh(&idev->lock);
return -EADDRNOTAVAIL;
}
int addrconf_add_ifaddr(struct net *net, void __user *arg)
{
struct in6_ifreq ireq;
int err;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
return -EFAULT;
rtnl_lock();
err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr,
ireq.ifr6_prefixlen, IFA_F_PERMANENT,
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
rtnl_unlock();
return err;
}
int addrconf_del_ifaddr(struct net *net, void __user *arg)
{
struct in6_ifreq ireq;
int err;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
return -EFAULT;
rtnl_lock();
err = inet6_addr_del(net, ireq.ifr6_ifindex, &ireq.ifr6_addr,
ireq.ifr6_prefixlen);
rtnl_unlock();
return err;
}
static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
int plen, int scope)
{
struct inet6_ifaddr *ifp;
ifp = ipv6_add_addr(idev, addr, plen, scope, IFA_F_PERMANENT);
if (!IS_ERR(ifp)) {
spin_lock_bh(&ifp->lock);
ifp->flags &= ~IFA_F_TENTATIVE;
spin_unlock_bh(&ifp->lock);
ipv6_ifa_notify(RTM_NEWADDR, ifp);
in6_ifa_put(ifp);
}
}
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
static void sit_add_v4_addrs(struct inet6_dev *idev)
{
struct in6_addr addr;
struct net_device *dev;
struct net *net = dev_net(idev->dev);
int scope;
ASSERT_RTNL();
memset(&addr, 0, sizeof(struct in6_addr));
memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
if (idev->dev->flags&IFF_POINTOPOINT) {
addr.s6_addr32[0] = htonl(0xfe800000);
scope = IFA_LINK;
} else {
scope = IPV6_ADDR_COMPATv4;
}
if (addr.s6_addr32[3]) {
add_addr(idev, &addr, 128, scope);
return;
}
for_each_netdev(net, dev) {
struct in_device * in_dev = __in_dev_get_rtnl(dev);
if (in_dev && (dev->flags & IFF_UP)) {
struct in_ifaddr * ifa;
int flag = scope;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
int plen;
addr.s6_addr32[3] = ifa->ifa_local;
if (ifa->ifa_scope == RT_SCOPE_LINK)
continue;
if (ifa->ifa_scope >= RT_SCOPE_HOST) {
if (idev->dev->flags&IFF_POINTOPOINT)
continue;
flag |= IFA_HOST;
}
if (idev->dev->flags&IFF_POINTOPOINT)
plen = 64;
else
plen = 96;
add_addr(idev, &addr, plen, flag);
}
}
}
}
#endif
static void init_loopback(struct net_device *dev)
{
struct inet6_dev *idev;
struct net_device *sp_dev;
struct inet6_ifaddr *sp_ifa;
struct rt6_info *sp_rt;
/* ::1 */
ASSERT_RTNL();
if ((idev = ipv6_find_idev(dev)) == NULL) {
printk(KERN_DEBUG "init loopback: add_dev failed\n");
return;
}
add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
/* Add routes to other interface's IPv6 addresses */
for_each_netdev(dev_net(dev), sp_dev) {
if (!strcmp(sp_dev->name, dev->name))
continue;
idev = __in6_dev_get(sp_dev);
if (!idev)
continue;
read_lock_bh(&idev->lock);
list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
continue;
if (sp_ifa->rt) {
/* This dst has been added to garbage list when
* lo device down, release this obsolete dst and
* reallocate a new router for ifa.
*/
if (sp_ifa->rt->dst.obsolete > 0) {
dst_release(&sp_ifa->rt->dst);
sp_ifa->rt = NULL;
} else {
continue;
}
}
sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
/* Failure cases are ignored */
if (!IS_ERR(sp_rt)) {
sp_ifa->rt = sp_rt;
ip6_ins_rt(sp_rt);
}
}
read_unlock_bh(&idev->lock);
}
}
static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct inet6_ifaddr * ifp;
u32 addr_flags = IFA_F_PERMANENT;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (idev->cnf.optimistic_dad &&
!dev_net(idev->dev)->ipv6.devconf_all->forwarding)
addr_flags |= IFA_F_OPTIMISTIC;
#endif
ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags);
if (!IS_ERR(ifp)) {
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
addrconf_dad_start(ifp, 0);
in6_ifa_put(ifp);
}
}
static void addrconf_dev_config(struct net_device *dev)
{
struct in6_addr addr;
struct inet6_dev * idev;
ASSERT_RTNL();
if ((dev->type != ARPHRD_ETHER) &&
(dev->type != ARPHRD_FDDI) &&
(dev->type != ARPHRD_IEEE802_TR) &&
(dev->type != ARPHRD_ARCNET) &&
(dev->type != ARPHRD_RAWIP) &&
(dev->type != ARPHRD_INFINIBAND)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
}
idev = addrconf_add_dev(dev);
if (IS_ERR(idev))
return;
memset(&addr, 0, sizeof(struct in6_addr));
addr.s6_addr32[0] = htonl(0xFE800000);
if (ipv6_generate_eui64(addr.s6_addr + 8, dev) == 0)
addrconf_add_linklocal(idev, &addr);
}
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
static void addrconf_sit_config(struct net_device *dev)
{
struct inet6_dev *idev;
ASSERT_RTNL();
/*
* Configure the tunnel with one of our IPv4
* addresses... we should configure all of
* our v4 addrs in the tunnel
*/
if ((idev = ipv6_find_idev(dev)) == NULL) {
printk(KERN_DEBUG "init sit: add_dev failed\n");
return;
}
if (dev->priv_flags & IFF_ISATAP) {
struct in6_addr addr;
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
addrconf_prefix_route(&addr, 64, dev, 0, 0);
if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
addrconf_add_linklocal(idev, &addr);
return;
}
sit_add_v4_addrs(idev);
if (dev->flags&IFF_POINTOPOINT) {
addrconf_add_mroute(dev);
addrconf_add_lroute(dev);
} else
sit_route_add(dev);
}
#endif
#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
static void addrconf_gre_config(struct net_device *dev)
{
struct inet6_dev *idev;
struct in6_addr addr;
pr_info("ipv6: addrconf_gre_config(%s)\n", dev->name);
ASSERT_RTNL();
if ((idev = ipv6_find_idev(dev)) == NULL) {
printk(KERN_DEBUG "init gre: add_dev failed\n");
return;
}
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
addrconf_prefix_route(&addr, 64, dev, 0, 0);
if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
addrconf_add_linklocal(idev, &addr);
}
#endif
static inline int
ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
{
struct in6_addr lladdr;
if (!ipv6_get_lladdr(link_dev, &lladdr, IFA_F_TENTATIVE)) {
addrconf_add_linklocal(idev, &lladdr);
return 0;
}
return -1;
}
static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
{
struct net_device *link_dev;
struct net *net = dev_net(idev->dev);
/* first try to inherit the link-local address from the link device */
if (idev->dev->iflink &&
(link_dev = __dev_get_by_index(net, idev->dev->iflink))) {
if (!ipv6_inherit_linklocal(idev, link_dev))
return;
}
/* then try to inherit it from any device */
for_each_netdev(net, link_dev) {
if (!ipv6_inherit_linklocal(idev, link_dev))
return;
}
printk(KERN_DEBUG "init ip6-ip6: add_linklocal failed\n");
}
/*
* Autoconfigure tunnel with a link-local address so routing protocols,
* DHCPv6, MLD etc. can be run over the virtual link
*/
static void addrconf_ip6_tnl_config(struct net_device *dev)
{
struct inet6_dev *idev;
ASSERT_RTNL();
idev = addrconf_add_dev(dev);
if (IS_ERR(idev)) {
printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
return;
}
ip6_tnl_add_linklocal(idev);
}
static int addrconf_notify(struct notifier_block *this, unsigned long event,
void * data)
{
struct net_device *dev = (struct net_device *) data;
struct inet6_dev *idev = __in6_dev_get(dev);
int run_pending = 0;
int err;
switch (event) {
case NETDEV_REGISTER:
if (!idev && dev->mtu >= IPV6_MIN_MTU) {
idev = ipv6_add_dev(dev);
if (!idev)
return notifier_from_errno(-ENOMEM);
}
break;
case NETDEV_UP:
case NETDEV_CHANGE:
if (dev->flags & IFF_SLAVE)
break;
if (event == NETDEV_UP) {
if (!addrconf_qdisc_ok(dev)) {
/* device is not ready yet. */
printk(KERN_INFO
"ADDRCONF(NETDEV_UP): %s: "
"link is not ready\n",
dev->name);
break;
}
if (!idev && dev->mtu >= IPV6_MIN_MTU)
idev = ipv6_add_dev(dev);
if (idev) {
idev->if_flags |= IF_READY;
run_pending = 1;
}
} else {
if (!addrconf_qdisc_ok(dev)) {
/* device is still not ready. */
break;
}
if (idev) {
if (idev->if_flags & IF_READY)
/* device is already configured. */
break;
idev->if_flags |= IF_READY;
}
printk(KERN_INFO
"ADDRCONF(NETDEV_CHANGE): %s: "
"link becomes ready\n",
dev->name);
run_pending = 1;
}
switch (dev->type) {
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
case ARPHRD_SIT:
addrconf_sit_config(dev);
break;
#endif
#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
case ARPHRD_IPGRE:
addrconf_gre_config(dev);
break;
#endif
case ARPHRD_TUNNEL6:
addrconf_ip6_tnl_config(dev);
break;
case ARPHRD_LOOPBACK:
init_loopback(dev);
break;
default:
addrconf_dev_config(dev);
break;
}
if (idev) {
if (run_pending)
addrconf_dad_run(idev);
/*
* If the MTU changed during the interface down,
* when the interface up, the changed MTU must be
* reflected in the idev as well as routers.
*/
if (idev->cnf.mtu6 != dev->mtu &&
dev->mtu >= IPV6_MIN_MTU) {
rt6_mtu_change(dev, dev->mtu);
idev->cnf.mtu6 = dev->mtu;
}
idev->tstamp = jiffies;
inet6_ifinfo_notify(RTM_NEWLINK, idev);
/*
* If the changed mtu during down is lower than
* IPV6_MIN_MTU stop IPv6 on this interface.
*/
if (dev->mtu < IPV6_MIN_MTU)
addrconf_ifdown(dev, 1);
}
break;
case NETDEV_CHANGEMTU:
if (idev && dev->mtu >= IPV6_MIN_MTU) {
rt6_mtu_change(dev, dev->mtu);
idev->cnf.mtu6 = dev->mtu;
break;
}
if (!idev && dev->mtu >= IPV6_MIN_MTU) {
idev = ipv6_add_dev(dev);
if (idev)
break;
}
/*
* MTU falled under IPV6_MIN_MTU.
* Stop IPv6 on this interface.
*/
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
/*
* Remove all addresses from this interface.
*/
addrconf_ifdown(dev, event != NETDEV_DOWN);
break;
case NETDEV_CHANGENAME:
if (idev) {
snmp6_unregister_dev(idev);
addrconf_sysctl_unregister(idev);
addrconf_sysctl_register(idev);
err = snmp6_register_dev(idev);
if (err)
return notifier_from_errno(err);
}
break;
case NETDEV_PRE_TYPE_CHANGE:
case NETDEV_POST_TYPE_CHANGE:
addrconf_type_change(dev, event);
break;
}
return NOTIFY_OK;
}
/*
* addrconf module should be notified of a device going up
*/
static struct notifier_block ipv6_dev_notf = {
.notifier_call = addrconf_notify,
};
static void addrconf_type_change(struct net_device *dev, unsigned long event)
{
struct inet6_dev *idev;
ASSERT_RTNL();
idev = __in6_dev_get(dev);
if (event == NETDEV_POST_TYPE_CHANGE)
ipv6_mc_remap(idev);
else if (event == NETDEV_PRE_TYPE_CHANGE)
ipv6_mc_unmap(idev);
}
static int addrconf_ifdown(struct net_device *dev, int how)
{
struct net *net = dev_net(dev);
struct inet6_dev *idev;
struct inet6_ifaddr *ifa;
int state, i;
ASSERT_RTNL();
rt6_ifdown(net, dev);
neigh_ifdown(&nd_tbl, dev);
idev = __in6_dev_get(dev);
if (idev == NULL)
return -ENODEV;
/*
* Step 1: remove reference to ipv6 device from parent device.
* Do not dev_put!
*/
if (how) {
idev->dead = 1;
/* protected by rtnl_lock */
RCU_INIT_POINTER(dev->ip6_ptr, NULL);
/* Step 1.5: remove snmp6 entry */
snmp6_unregister_dev(idev);
}
/* Step 2: clear hash table */
spin_lock_bh(&addrconf_hash_lock);
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
struct hlist_head *h = &inet6_addr_lst[i];
struct hlist_node *n;
restart:
hlist_for_each_entry_rcu(ifa, n, h, addr_lst) {
if (ifa->idev == idev) {
hlist_del_init_rcu(&ifa->addr_lst);
addrconf_del_timer(ifa);
goto restart;
}
}
}
write_lock_bh(&idev->lock);
/* Step 2: clear flags for stateless addrconf */
if (!how)
idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
#ifdef CONFIG_IPV6_PRIVACY
if (how && del_timer(&idev->regen_timer))
in6_dev_put(idev);
/* Step 3: clear tempaddr list */
while (!list_empty(&idev->tempaddr_list)) {
ifa = list_first_entry(&idev->tempaddr_list,
struct inet6_ifaddr, tmp_list);
list_del(&ifa->tmp_list);
write_unlock_bh(&idev->lock);
spin_lock_bh(&ifa->lock);
if (ifa->ifpub) {
in6_ifa_put(ifa->ifpub);
ifa->ifpub = NULL;
}
spin_unlock_bh(&ifa->lock);
in6_ifa_put(ifa);
write_lock_bh(&idev->lock);
}
#endif
while (!list_empty(&idev->addr_list)) {
ifa = list_first_entry(&idev->addr_list,
struct inet6_ifaddr, if_list);
addrconf_del_timer(ifa);
list_del(&ifa->if_list);
write_unlock_bh(&idev->lock);
spin_lock_bh(&ifa->state_lock);
state = ifa->state;
ifa->state = INET6_IFADDR_STATE_DEAD;
spin_unlock_bh(&ifa->state_lock);
if (state != INET6_IFADDR_STATE_DEAD) {
__ipv6_ifa_notify(RTM_DELADDR, ifa);
atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
}
in6_ifa_put(ifa);
write_lock_bh(&idev->lock);
}
write_unlock_bh(&idev->lock);
spin_unlock_bh(&addrconf_hash_lock);
/* Step 5: Discard anycast and multicast list */
if (how) {
ipv6_ac_destroy_dev(idev);
ipv6_mc_destroy_dev(idev);
} else {
ipv6_mc_down(idev);
}
idev->tstamp = jiffies;
/* Last: Shot the device (if unregistered) */
if (how) {
addrconf_sysctl_unregister(idev);
neigh_parms_release(&nd_tbl, idev->nd_parms);
neigh_ifdown(&nd_tbl, dev);
in6_dev_put(idev);
}
return 0;
}
static void addrconf_rs_timer(unsigned long data)
{
struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
struct inet6_dev *idev = ifp->idev;
read_lock(&idev->lock);
if (idev->dead || !(idev->if_flags & IF_READY))
goto out;
if (idev->cnf.forwarding)
goto out;
/* Announcement received after solicitation was sent */
//
if (idev->if_flags & IF_RA_RCVD){
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
printk(KERN_DEBUG "[LGE_DATA][%s()] The RA msg had been received!", __func__);
#endif
goto out;
}
//
spin_lock(&ifp->lock);
if (ifp->probes++ < idev->cnf.rtr_solicits) {
/* The wait after the last probe can be shorter */
addrconf_mod_timer(ifp, AC_RS,
(ifp->probes == idev->cnf.rtr_solicits) ?
idev->cnf.rtr_solicit_delay :
idev->cnf.rtr_solicit_interval);
spin_unlock(&ifp->lock);
//
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
printk(KERN_DEBUG "[LGE_DATA][%s()][stage 2] rs is sent now!", __func__);
#endif
//
ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
} else {
spin_unlock(&ifp->lock);
/*
* Note: we do not support deprecated "all on-link"
* assumption any longer.
*/
printk(KERN_DEBUG "%s: no IPv6 routers present\n",
idev->dev->name);
}
out:
read_unlock(&idev->lock);
in6_ifa_put(ifp);
}
/*
* Duplicate Address Detection
*/
static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
{
unsigned long rand_num;
struct inet6_dev *idev = ifp->idev;
if (ifp->flags & IFA_F_OPTIMISTIC)
rand_num = 0;
else
rand_num = net_random() % (idev->cnf.rtr_solicit_delay ? : 1);
ifp->probes = idev->cnf.dad_transmits;
//
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
printk(KERN_DEBUG "[LGE_DATA][%s()] dad_transmits == %d, ramd_num == %lu", __func__, idev->cnf.dad_transmits, rand_num);
#endif
//
addrconf_mod_timer(ifp, AC_DAD, rand_num);
}
static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
{
struct inet6_dev *idev = ifp->idev;
struct net_device *dev = idev->dev;
//
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
int ipv6AddrType = 0; //initializing
const char InterfaceNameToApply[6]="rmnet";
char CurrentInterfaceName[6]={0};//initializing
ipv6AddrType = ipv6_addr_type(&ifp->addr);
printk(KERN_DEBUG "[LGE_DATA][%s()] dad_start! dev_name == %s", __func__, dev->name);
printk(KERN_DEBUG "[LGE_DATA][%s()] ipv6_addr_type == %d", __func__, ipv6AddrType);
strncpy(CurrentInterfaceName,dev->name,5);
if(CurrentInterfaceName == NULL){
printk(KERN_DEBUG "[LGE_DATA] CurrentInterfaceName is NULL !\n");
return;
}
#endif
//
addrconf_join_solict(dev, &ifp->addr);
net_srandom(ifp->addr.s6_addr32[3]);
read_lock_bh(&idev->lock);
spin_lock(&ifp->lock);
if (ifp->state == INET6_IFADDR_STATE_DEAD)
goto out;
//
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
if (((strcmp(InterfaceNameToApply, CurrentInterfaceName) == 0) && (ipv6AddrType == LGE_DATA_GLOBAL_SCOPE))
|| (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
idev->cnf.accept_dad < 1 ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD))
#else
// Kernel Original implemenatation START
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
idev->cnf.accept_dad < 1 ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD)
// Kernel Original implemenatation END
#endif
//
{
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
//
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
printk(KERN_DEBUG "[LGE_DATA][%s()] ipv6_addr_type == %d, Because the IPv6 type is Global Scope, we will immediately finish the DAD process for Global Scope.", __func__, ipv6AddrType);
#endif
//
addrconf_dad_completed(ifp);
return;
}
if (!(idev->if_flags & IF_READY)) {
spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
/*
* If the device is not ready:
* - keep it tentative if it is a permanent address.
* - otherwise, kill it.
*/
in6_ifa_hold(ifp);
addrconf_dad_stop(ifp, 0);
return;
}
/*
* Optimistic nodes can start receiving
* Frames right away
*/
if (ifp->flags & IFA_F_OPTIMISTIC) {
ip6_ins_rt(ifp->rt);
if (ipv6_use_optimistic_addr(idev)) {
/* Because optimistic nodes can use this address,
* notify listeners. If DAD fails, RTM_DELADDR is sent.
*/
ipv6_ifa_notify(RTM_NEWADDR, ifp);
}
}
addrconf_dad_kick(ifp);
out:
spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
}
static void addrconf_dad_timer(unsigned long data)
{
struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
struct inet6_dev *idev = ifp->idev;
struct in6_addr mcaddr;
//
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
struct net_device *dev = idev->dev;
const char InterfaceNameToApply[6]="rmnet";
char CurrentInterfaceName[6]={0};//initializing
#endif
//
if (!ifp->probes && addrconf_dad_end(ifp))
goto out;
read_lock(&idev->lock);
if (idev->dead || !(idev->if_flags & IF_READY)) {
read_unlock(&idev->lock);
goto out;
}
spin_lock(&ifp->lock);
if (ifp->state == INET6_IFADDR_STATE_DEAD) {
spin_unlock(&ifp->lock);
read_unlock(&idev->lock);
goto out;
}
if (ifp->probes == 0) {
/*
* DAD was successful
*/
//
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
printk(KERN_DEBUG "[LGE_DATA][%s()] DAD was successful!", __func__);
#endif
//
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock(&ifp->lock);
read_unlock(&idev->lock);
addrconf_dad_completed(ifp);
goto out;
}
ifp->probes--;
//
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
printk(KERN_DEBUG "[LGE_DATA][%s()], ifp->idev->nd_parms->retrans_time == %d", __func__, ifp->idev->nd_parms->retrans_time);
printk(KERN_DEBUG "[LGE_DATA][%s()] dev_name == %s", __func__, dev->name);
strncpy(CurrentInterfaceName,dev->name,5);
if(CurrentInterfaceName == NULL){
spin_unlock(&ifp->lock);
read_unlock(&idev->lock);
printk(KERN_DEBUG "[LGE_DATA] CurrentInterfaceName is NULL !\n");
goto out;
}
printk(KERN_DEBUG "[LGE_DATA][%s()] CopyInterfaceName == %s, CurrentInterfaceName == %s", __func__, InterfaceNameToApply, CurrentInterfaceName);
if(strcmp(InterfaceNameToApply, CurrentInterfaceName) == 0){//In case of rmnet, this patch will be applied bacause We should not impact to the Wi-Fi and so on.
addrconf_mod_timer(ifp, AC_DAD, LGE_DATA_WAITING_TIME_FOR_DAD_OF_LGU);
printk(KERN_DEBUG "[LGE_DATA][%s()] The waiting time for link-local DAD is set as [%d] milli-seconds in case of only rmnet interface !", __func__, LGE_DATA_WAITING_TIME_FOR_DAD_OF_LGU*10);
}else{
//kernel original code -- START
addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time);
//kernel original code -- END
}
#else
addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time);
#endif
//
spin_unlock(&ifp->lock);
read_unlock(&idev->lock);
/* send a neighbour solicitation for our addr */
//
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
printk(KERN_DEBUG "[LGE_DATA][%s()] send a neighbour solicitation for our addr !", __func__);
#endif
//
addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
out:
in6_ifa_put(ifp);
}
static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
{
struct net_device *dev = ifp->idev->dev;
/*
* Configure the address for reception. Now it is valid.
*/
ipv6_ifa_notify(RTM_NEWADDR, ifp);
//
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
printk(KERN_DEBUG "[LGE_DATA][%s()] dad_is_completed!", __func__);
#endif
//
/* If added prefix is link local and we are prepared to process
router advertisements, start sending router solicitations.
*/
if (((ifp->idev->cnf.accept_ra == 1 && !ifp->idev->cnf.forwarding) ||
ifp->idev->cnf.accept_ra == 2) &&
ifp->idev->cnf.rtr_solicits > 0 &&
(dev->flags&IFF_LOOPBACK) == 0 &&
(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
/*
* If a host as already performed a random delay
* [...] as part of DAD [...] there is no need
* to delay again before sending the first RS
*/
//
#ifdef CONFIG_LGP_DATA_TCPIP_SLAAC_IPV6_ALLOCATION_BOOSTER
printk(KERN_DEBUG "[LGE_DATA][%s()][stage 1] rs is sent now!", __func__);
#endif
//
ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
spin_lock_bh(&ifp->lock);
ifp->probes = 1;
ifp->idev->if_flags |= IF_RS_SENT;
addrconf_mod_timer(ifp, AC_RS, ifp->idev->cnf.rtr_solicit_interval);
spin_unlock_bh(&ifp->lock);
}
}
static void addrconf_dad_run(struct inet6_dev *idev)
{
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
list_for_each_entry(ifp, &idev->addr_list, if_list) {
spin_lock(&ifp->lock);
if (ifp->flags & IFA_F_TENTATIVE &&
ifp->state == INET6_IFADDR_STATE_DAD)
addrconf_dad_kick(ifp);
spin_unlock(&ifp->lock);
}
read_unlock_bh(&idev->lock);
}
#ifdef CONFIG_PROC_FS
struct if6_iter_state {
struct seq_net_private p;
int bucket;
int offset;
};
static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
{
struct inet6_ifaddr *ifa = NULL;
struct if6_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
int p = 0;
/* initial bucket if pos is 0 */
if (pos == 0) {
state->bucket = 0;
state->offset = 0;
}
for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
struct hlist_node *n;
hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
/* sync with offset */
if (p < state->offset) {
p++;
continue;
}
state->offset++;
return ifa;
}
/* prepare for next bucket */
state->offset = 0;
p = 0;
}
return NULL;
}
static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
struct inet6_ifaddr *ifa)
{
struct if6_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
struct hlist_node *n = &ifa->addr_lst;
hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
state->offset++;
return ifa;
}
while (++state->bucket < IN6_ADDR_HSIZE) {
state->offset = 0;
hlist_for_each_entry_rcu_bh(ifa, n,
&inet6_addr_lst[state->bucket], addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
state->offset++;
return ifa;
}
}
return NULL;
}
static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(rcu_bh)
{
rcu_read_lock_bh();
return if6_get_first(seq, *pos);
}
static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct inet6_ifaddr *ifa;
ifa = if6_get_next(seq, v);
++*pos;
return ifa;
}
static void if6_seq_stop(struct seq_file *seq, void *v)
__releases(rcu_bh)
{
rcu_read_unlock_bh();
}
static int if6_seq_show(struct seq_file *seq, void *v)
{
struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
&ifp->addr,
ifp->idev->dev->ifindex,
ifp->prefix_len,
ifp->scope,
ifp->flags,
ifp->idev->dev->name);
return 0;
}
static const struct seq_operations if6_seq_ops = {
.start = if6_seq_start,
.next = if6_seq_next,
.show = if6_seq_show,
.stop = if6_seq_stop,
};
static int if6_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &if6_seq_ops,
sizeof(struct if6_iter_state));
}
static const struct file_operations if6_fops = {
.owner = THIS_MODULE,
.open = if6_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int __net_init if6_proc_net_init(struct net *net)
{
if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops))
return -ENOMEM;
return 0;
}
static void __net_exit if6_proc_net_exit(struct net *net)
{
proc_net_remove(net, "if_inet6");
}
static struct pernet_operations if6_proc_net_ops = {
.init = if6_proc_net_init,
.exit = if6_proc_net_exit,
};
int __init if6_proc_init(void)
{
return register_pernet_subsys(&if6_proc_net_ops);
}
void if6_proc_exit(void)
{
unregister_pernet_subsys(&if6_proc_net_ops);
}
#endif /* CONFIG_PROC_FS */
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
/* Check if address is a home address configured on any interface. */
int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
{
int ret = 0;
struct inet6_ifaddr *ifp = NULL;
struct hlist_node *n;
unsigned int hash = ipv6_addr_hash(addr);
rcu_read_lock_bh();
hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr) &&
(ifp->flags & IFA_F_HOMEADDRESS)) {
ret = 1;
break;
}
}
rcu_read_unlock_bh();
return ret;
}
#endif
/*
* Periodic address status verification
*/
static void addrconf_verify(unsigned long foo)
{
unsigned long now, next, next_sec, next_sched;
struct inet6_ifaddr *ifp;
struct hlist_node *node;
int i;
rcu_read_lock_bh();
spin_lock(&addrconf_verify_lock);
now = jiffies;
next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
del_timer(&addr_chk_timer);
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
restart:
hlist_for_each_entry_rcu_bh(ifp, node,
&inet6_addr_lst[i], addr_lst) {
unsigned long age;
if (ifp->flags & IFA_F_PERMANENT)
continue;
spin_lock(&ifp->lock);
/* We try to batch several events at once. */
age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
if (ifp->valid_lft != INFINITY_LIFE_TIME &&
age >= ifp->valid_lft) {
spin_unlock(&ifp->lock);
in6_ifa_hold(ifp);
ipv6_del_addr(ifp);
goto restart;
} else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
spin_unlock(&ifp->lock);
continue;
} else if (age >= ifp->prefered_lft) {
/* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
int deprecate = 0;
if (!(ifp->flags&IFA_F_DEPRECATED)) {
deprecate = 1;
ifp->flags |= IFA_F_DEPRECATED;
}
if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))
next = ifp->tstamp + ifp->valid_lft * HZ;
spin_unlock(&ifp->lock);
if (deprecate) {
in6_ifa_hold(ifp);
ipv6_ifa_notify(0, ifp);
in6_ifa_put(ifp);
goto restart;
}
#ifdef CONFIG_IPV6_PRIVACY
} else if ((ifp->flags&IFA_F_TEMPORARY) &&
!(ifp->flags&IFA_F_TENTATIVE)) {
unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
ifp->idev->cnf.dad_transmits *
ifp->idev->nd_parms->retrans_time / HZ;
if (age >= ifp->prefered_lft - regen_advance) {
struct inet6_ifaddr *ifpub = ifp->ifpub;
if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
next = ifp->tstamp + ifp->prefered_lft * HZ;
if (!ifp->regen_count && ifpub) {
ifp->regen_count++;
in6_ifa_hold(ifp);
in6_ifa_hold(ifpub);
spin_unlock(&ifp->lock);
spin_lock(&ifpub->lock);
ifpub->regen_count = 0;
spin_unlock(&ifpub->lock);
ipv6_create_tempaddr(ifpub, ifp);
in6_ifa_put(ifpub);
in6_ifa_put(ifp);
goto restart;
}
} else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
spin_unlock(&ifp->lock);
#endif
} else {
/* ifp->prefered_lft <= ifp->valid_lft */
if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
next = ifp->tstamp + ifp->prefered_lft * HZ;
spin_unlock(&ifp->lock);
}
}
}
next_sec = round_jiffies_up(next);
next_sched = next;
/* If rounded timeout is accurate enough, accept it. */
if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
next_sched = next_sec;
/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
now, next, next_sec, next_sched);
addr_chk_timer.expires = next_sched;
add_timer(&addr_chk_timer);
spin_unlock(&addrconf_verify_lock);
rcu_read_unlock_bh();
}
static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local)
{
struct in6_addr *pfx = NULL;
if (addr)
pfx = nla_data(addr);
if (local) {
if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
pfx = NULL;
else
pfx = nla_data(local);
}
return pfx;
}
static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
[IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
[IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
[IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
};
static int
inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct ifaddrmsg *ifm;
struct nlattr *tb[IFA_MAX+1];
struct in6_addr *pfx;
int err;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
if (err < 0)
return err;
ifm = nlmsg_data(nlh);
pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL]);
if (pfx == NULL)
return -EINVAL;
return inet6_addr_del(net, ifm->ifa_index, pfx, ifm->ifa_prefixlen);
}
static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
u32 prefered_lft, u32 valid_lft)
{
u32 flags;
clock_t expires;
unsigned long timeout;
if (!valid_lft || (prefered_lft > valid_lft))
return -EINVAL;
timeout = addrconf_timeout_fixup(valid_lft, HZ);
if (addrconf_finite_timeout(timeout)) {
expires = jiffies_to_clock_t(timeout * HZ);
valid_lft = timeout;
flags = RTF_EXPIRES;
} else {
expires = 0;
flags = 0;
ifa_flags |= IFA_F_PERMANENT;
}
timeout = addrconf_timeout_fixup(prefered_lft, HZ);
if (addrconf_finite_timeout(timeout)) {
if (timeout == 0)
ifa_flags |= IFA_F_DEPRECATED;
prefered_lft = timeout;
}
spin_lock_bh(&ifp->lock);
ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS)) | ifa_flags;
ifp->tstamp = jiffies;
ifp->valid_lft = valid_lft;
ifp->prefered_lft = prefered_lft;
spin_unlock_bh(&ifp->lock);
if (!(ifp->flags&IFA_F_TENTATIVE))
ipv6_ifa_notify(0, ifp);
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
expires, flags);
addrconf_verify(0);
return 0;
}
static int
inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct ifaddrmsg *ifm;
struct nlattr *tb[IFA_MAX+1];
struct in6_addr *pfx;
struct inet6_ifaddr *ifa;
struct net_device *dev;
u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
u8 ifa_flags;
int err;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
if (err < 0)
return err;
ifm = nlmsg_data(nlh);
pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL]);
if (pfx == NULL)
return -EINVAL;
if (tb[IFA_CACHEINFO]) {
struct ifa_cacheinfo *ci;
ci = nla_data(tb[IFA_CACHEINFO]);
valid_lft = ci->ifa_valid;
preferred_lft = ci->ifa_prefered;
} else {
preferred_lft = INFINITY_LIFE_TIME;
valid_lft = INFINITY_LIFE_TIME;
}
dev = __dev_get_by_index(net, ifm->ifa_index);
if (dev == NULL)
return -ENODEV;
/* We ignore other flags so far. */
ifa_flags = ifm->ifa_flags & (IFA_F_NODAD | IFA_F_HOMEADDRESS);
ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
if (ifa == NULL) {
/*
* It would be best to check for !NLM_F_CREATE here but
* userspace alreay relies on not having to provide this.
*/
return inet6_addr_add(net, ifm->ifa_index, pfx,
ifm->ifa_prefixlen, ifa_flags,
preferred_lft, valid_lft);
}
if (nlh->nlmsg_flags & NLM_F_EXCL ||
!(nlh->nlmsg_flags & NLM_F_REPLACE))
err = -EEXIST;
else
err = inet6_addr_modify(ifa, ifa_flags, preferred_lft, valid_lft);
in6_ifa_put(ifa);
return err;
}
static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u8 flags,
u8 scope, int ifindex)
{
struct ifaddrmsg *ifm;
ifm = nlmsg_data(nlh);
ifm->ifa_family = AF_INET6;
ifm->ifa_prefixlen = prefixlen;
ifm->ifa_flags = flags;
ifm->ifa_scope = scope;
ifm->ifa_index = ifindex;
}
static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
unsigned long tstamp, u32 preferred, u32 valid)
{
struct ifa_cacheinfo ci;
ci.cstamp = cstamp_delta(cstamp);
ci.tstamp = cstamp_delta(tstamp);
ci.ifa_prefered = preferred;
ci.ifa_valid = valid;
return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
}
static inline int rt_scope(int ifa_scope)
{
if (ifa_scope & IFA_HOST)
return RT_SCOPE_HOST;
else if (ifa_scope & IFA_LINK)
return RT_SCOPE_LINK;
else if (ifa_scope & IFA_SITE)
return RT_SCOPE_SITE;
else
return RT_SCOPE_UNIVERSE;
}
static inline int inet6_ifaddr_msgsize(void)
{
return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
+ nla_total_size(16) /* IFA_ADDRESS */
+ nla_total_size(sizeof(struct ifa_cacheinfo));
}
static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
u32 pid, u32 seq, int event, unsigned int flags)
{
struct nlmsghdr *nlh;
u32 preferred, valid;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
if (nlh == NULL)
return -EMSGSIZE;
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
ifa->idev->dev->ifindex);
if (!(ifa->flags&IFA_F_PERMANENT)) {
preferred = ifa->prefered_lft;
valid = ifa->valid_lft;
if (preferred != INFINITY_LIFE_TIME) {
long tval = (jiffies - ifa->tstamp)/HZ;
if (preferred > tval)
preferred -= tval;
else
preferred = 0;
if (valid != INFINITY_LIFE_TIME) {
if (valid > tval)
valid -= tval;
else
valid = 0;
}
}
} else {
preferred = INFINITY_LIFE_TIME;
valid = INFINITY_LIFE_TIME;
}
if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0 ||
put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
return nlmsg_end(skb, nlh);
}
static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
u32 pid, u32 seq, int event, u16 flags)
{
struct nlmsghdr *nlh;
u8 scope = RT_SCOPE_UNIVERSE;
int ifindex = ifmca->idev->dev->ifindex;
if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
if (nlh == NULL)
return -EMSGSIZE;
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put(skb, IFA_MULTICAST, 16, &ifmca->mca_addr) < 0 ||
put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
return nlmsg_end(skb, nlh);
}
static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
u32 pid, u32 seq, int event, unsigned int flags)
{
struct nlmsghdr *nlh;
u8 scope = RT_SCOPE_UNIVERSE;
int ifindex = ifaca->aca_idev->dev->ifindex;
if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
if (nlh == NULL)
return -EMSGSIZE;
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put(skb, IFA_ANYCAST, 16, &ifaca->aca_addr) < 0 ||
put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
return nlmsg_end(skb, nlh);
}
enum addr_type_t {
UNICAST_ADDR,
MULTICAST_ADDR,
ANYCAST_ADDR,
};
/* called with rcu_read_lock() */
static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
struct netlink_callback *cb, enum addr_type_t type,
int s_ip_idx, int *p_ip_idx)
{
struct ifmcaddr6 *ifmca;
struct ifacaddr6 *ifaca;
int err = 1;
int ip_idx = *p_ip_idx;
read_lock_bh(&idev->lock);
switch (type) {
case UNICAST_ADDR: {
struct inet6_ifaddr *ifa;
/* unicast address incl. temp addr */
list_for_each_entry(ifa, &idev->addr_list, if_list) {
if (++ip_idx < s_ip_idx)
continue;
err = inet6_fill_ifaddr(skb, ifa,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWADDR,
NLM_F_MULTI);
if (err <= 0)
break;
}
break;
}
case MULTICAST_ADDR:
/* multicast address */
for (ifmca = idev->mc_list; ifmca;
ifmca = ifmca->next, ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
err = inet6_fill_ifmcaddr(skb, ifmca,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_GETMULTICAST,
NLM_F_MULTI);
if (err <= 0)
break;
}
break;
case ANYCAST_ADDR:
/* anycast address */
for (ifaca = idev->ac_list; ifaca;
ifaca = ifaca->aca_next, ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
err = inet6_fill_ifacaddr(skb, ifaca,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_GETANYCAST,
NLM_F_MULTI);
if (err <= 0)
break;
}
break;
default:
break;
}
read_unlock_bh(&idev->lock);
*p_ip_idx = ip_idx;
return err;
}
static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
enum addr_type_t type)
{
struct net *net = sock_net(skb->sk);
int h, s_h;
int idx, ip_idx;
int s_idx, s_ip_idx;
struct net_device *dev;
struct inet6_dev *idev;
struct hlist_head *head;
struct hlist_node *node;
s_h = cb->args[0];
s_idx = idx = cb->args[1];
s_ip_idx = ip_idx = cb->args[2];
rcu_read_lock();
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
if (idx < s_idx)
goto cont;
if (h > s_h || idx > s_idx)
s_ip_idx = 0;
ip_idx = 0;
idev = __in6_dev_get(dev);
if (!idev)
goto cont;
if (in6_dump_addrs(idev, skb, cb, type,
s_ip_idx, &ip_idx) <= 0)
goto done;
cont:
idx++;
}
}
done:
rcu_read_unlock();
cb->args[0] = h;
cb->args[1] = idx;
cb->args[2] = ip_idx;
return skb->len;
}
static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
{
enum addr_type_t type = UNICAST_ADDR;
return inet6_dump_addr(skb, cb, type);
}
static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
{
enum addr_type_t type = MULTICAST_ADDR;
return inet6_dump_addr(skb, cb, type);
}
static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
{
enum addr_type_t type = ANYCAST_ADDR;
return inet6_dump_addr(skb, cb, type);
}
static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
void *arg)
{
struct net *net = sock_net(in_skb->sk);
struct ifaddrmsg *ifm;
struct nlattr *tb[IFA_MAX+1];
struct in6_addr *addr = NULL;
struct net_device *dev = NULL;
struct inet6_ifaddr *ifa;
struct sk_buff *skb;
int err;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
if (err < 0)
goto errout;
addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL]);
if (addr == NULL) {
err = -EINVAL;
goto errout;
}
ifm = nlmsg_data(nlh);
if (ifm->ifa_index)
dev = __dev_get_by_index(net, ifm->ifa_index);
ifa = ipv6_get_ifaddr(net, addr, dev, 1);
if (!ifa) {
err = -EADDRNOTAVAIL;
goto errout;
}
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto errout_ifa;
}
err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, RTM_NEWADDR, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout_ifa;
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
errout_ifa:
in6_ifa_put(ifa);
errout:
return err;
}
static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
{
struct sk_buff *skb;
struct net *net = dev_net(ifa->idev->dev);
int err = -ENOBUFS;
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
}
static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
__s32 *array, int bytes)
{
BUG_ON(bytes < (DEVCONF_MAX * 4));
memset(array, 0, bytes);
array[DEVCONF_FORWARDING] = cnf->forwarding;
array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
array[DEVCONF_MTU6] = cnf->mtu6;
array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
array[DEVCONF_AUTOCONF] = cnf->autoconf;
array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
array[DEVCONF_RTR_SOLICIT_INTERVAL] =
jiffies_to_msecs(cnf->rtr_solicit_interval);
array[DEVCONF_RTR_SOLICIT_DELAY] =
jiffies_to_msecs(cnf->rtr_solicit_delay);
array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
#ifdef CONFIG_IPV6_PRIVACY
array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
#endif
array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
#ifdef CONFIG_IPV6_ROUTER_PREF
array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
array[DEVCONF_RTR_PROBE_INTERVAL] =
jiffies_to_msecs(cnf->rtr_probe_interval);
#ifdef CONFIG_IPV6_ROUTE_INFO
array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
#endif
#endif
array[DEVCONF_ACCEPT_RA_RT_TABLE] = cnf->accept_ra_rt_table;
array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
#endif
#ifdef CONFIG_IPV6_MROUTE
array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
#endif
array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
#ifdef CONFIG_LGE_DHCPV6_WIFI
array[DEVCONF_RA_INFO_FLAG] = cnf->ra_info_flag;
#endif
}
static inline size_t inet6_ifla6_size(void)
{
return nla_total_size(4) /* IFLA_INET6_FLAGS */
+ nla_total_size(sizeof(struct ifla_cacheinfo))
+ nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
+ nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
+ nla_total_size(ICMP6_MIB_MAX * 8); /* IFLA_INET6_ICMP6STATS */
}
static inline size_t inet6_if_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ nla_total_size(4) /* IFLA_MTU */
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
}
static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
int items, int bytes)
{
int i;
int pad = bytes - sizeof(u64) * items;
BUG_ON(pad < 0);
/* Use put_unaligned() because stats may not be aligned for u64. */
put_unaligned(items, &stats[0]);
for (i = 1; i < items; i++)
put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
memset(&stats[items], 0, pad);
}
static inline void __snmp6_fill_stats64(u64 *stats, void __percpu **mib,
int items, int bytes, size_t syncpoff)
{
int i;
int pad = bytes - sizeof(u64) * items;
BUG_ON(pad < 0);
/* Use put_unaligned() because stats may not be aligned for u64. */
put_unaligned(items, &stats[0]);
for (i = 1; i < items; i++)
put_unaligned(snmp_fold_field64(mib, i, syncpoff), &stats[i]);
memset(&stats[items], 0, pad);
}
static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
int bytes)
{
switch (attrtype) {
case IFLA_INET6_STATS:
__snmp6_fill_stats64(stats, (void __percpu **)idev->stats.ipv6,
IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
break;
case IFLA_INET6_ICMP6STATS:
__snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, ICMP6_MIB_MAX, bytes);
break;
}
}
static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
{
struct nlattr *nla;
struct ifla_cacheinfo ci;
NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
ci.max_reasm_len = IPV6_MAXPLEN;
ci.tstamp = cstamp_delta(idev->tstamp);
ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
if (nla == NULL)
goto nla_put_failure;
ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
/* XXX - MC not implemented */
nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
if (nla == NULL)
goto nla_put_failure;
snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
if (nla == NULL)
goto nla_put_failure;
snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static size_t inet6_get_link_af_size(const struct net_device *dev)
{
if (!__in6_dev_get(dev))
return 0;
return inet6_ifla6_size();
}
static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
{
struct inet6_dev *idev = __in6_dev_get(dev);
if (!idev)
return -ENODATA;
if (inet6_fill_ifla6_attrs(skb, idev) < 0)
return -EMSGSIZE;
return 0;
}
static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
u32 pid, u32 seq, int event, unsigned int flags)
{
struct net_device *dev = idev->dev;
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
void *protoinfo;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
if (nlh == NULL)
return -EMSGSIZE;
hdr = nlmsg_data(nlh);
hdr->ifi_family = AF_INET6;
hdr->__ifi_pad = 0;
hdr->ifi_type = dev->type;
hdr->ifi_index = dev->ifindex;
hdr->ifi_flags = dev_get_flags(dev);
hdr->ifi_change = 0;
NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
if (dev->addr_len)
NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
if (dev->ifindex != dev->iflink)
NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
if (protoinfo == NULL)
goto nla_put_failure;
if (inet6_fill_ifla6_attrs(skb, idev) < 0)
goto nla_put_failure;
nla_nest_end(skb, protoinfo);
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
int h, s_h;
int idx = 0, s_idx;
struct net_device *dev;
struct inet6_dev *idev;
struct hlist_head *head;
struct hlist_node *node;
s_h = cb->args[0];
s_idx = cb->args[1];
rcu_read_lock();
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
if (idx < s_idx)
goto cont;
idev = __in6_dev_get(dev);
if (!idev)
goto cont;
if (inet6_fill_ifinfo(skb, idev,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWLINK, NLM_F_MULTI) <= 0)
goto out;
cont:
idx++;
}
}
out:
rcu_read_unlock();
cb->args[1] = idx;
cb->args[0] = h;
return skb->len;
}
void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
{
struct sk_buff *skb;
struct net *net = dev_net(idev->dev);
int err = -ENOBUFS;
skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
}
static inline size_t inet6_prefix_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct prefixmsg))
+ nla_total_size(sizeof(struct in6_addr))
+ nla_total_size(sizeof(struct prefix_cacheinfo));
}
static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
struct prefix_info *pinfo, u32 pid, u32 seq,
int event, unsigned int flags)
{
struct prefixmsg *pmsg;
struct nlmsghdr *nlh;
struct prefix_cacheinfo ci;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*pmsg), flags);
if (nlh == NULL)
return -EMSGSIZE;
pmsg = nlmsg_data(nlh);
pmsg->prefix_family = AF_INET6;
pmsg->prefix_pad1 = 0;
pmsg->prefix_pad2 = 0;
pmsg->prefix_ifindex = idev->dev->ifindex;
pmsg->prefix_len = pinfo->prefix_len;
pmsg->prefix_type = pinfo->type;
pmsg->prefix_pad3 = 0;
pmsg->prefix_flags = 0;
if (pinfo->onlink)
pmsg->prefix_flags |= IF_PREFIX_ONLINK;
if (pinfo->autoconf)
pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
NLA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix);
ci.preferred_time = ntohl(pinfo->prefered);
ci.valid_time = ntohl(pinfo->valid);
NLA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci);
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static void inet6_prefix_notify(int event, struct inet6_dev *idev,
struct prefix_info *pinfo)
{
struct sk_buff *skb;
struct net *net = dev_net(idev->dev);
int err = -ENOBUFS;
skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
}
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
{
inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
switch (event) {
case RTM_NEWADDR:
/*
* If the address was optimistic
* we inserted the route at the start of
* our DAD process, so we don't need
* to do it again
*/
if (!(ifp->rt->rt6i_node))
ip6_ins_rt(ifp->rt);
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);
break;
case RTM_DELADDR:
if (ifp->idev->cnf.forwarding)
addrconf_leave_anycast(ifp);
addrconf_leave_solict(ifp->idev, &ifp->addr);
dst_hold(&ifp->rt->dst);
if (ip6_del_rt(ifp->rt))
dst_free(&ifp->rt->dst);
break;
}
}
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
{
rcu_read_lock_bh();
if (likely(ifp->idev->dead == 0))
__ipv6_ifa_notify(event, ifp);
rcu_read_unlock_bh();
}
#ifdef CONFIG_SYSCTL
static
int addrconf_sysctl_forward(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
loff_t pos = *ppos;
ctl_table lctl;
int ret;
/*
* ctl->data points to idev->cnf.forwarding, we should
* not modify it until we get the rtnl lock.
*/
lctl = *ctl;
lctl.data = &val;
ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
if (write)
ret = addrconf_fixup_forwarding(ctl, valp, val);
if (ret)
*ppos = pos;
return ret;
}
static void dev_disable_change(struct inet6_dev *idev)
{
if (!idev || !idev->dev)
return;
if (idev->cnf.disable_ipv6)
addrconf_notify(NULL, NETDEV_DOWN, idev->dev);
else
addrconf_notify(NULL, NETDEV_UP, idev->dev);
}
static void addrconf_disable_change(struct net *net, __s32 newf)
{
struct net_device *dev;
struct inet6_dev *idev;
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
idev->cnf.disable_ipv6 = newf;
if (changed)
dev_disable_change(idev);
}
}
rcu_read_unlock();
}
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
{
struct net *net;
int old;
if (!rtnl_trylock())
return restart_syscall();
net = (struct net *)table->extra2;
old = *p;
*p = newf;
if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
rtnl_unlock();
return 0;
}
if (p == &net->ipv6.devconf_all->disable_ipv6) {
net->ipv6.devconf_dflt->disable_ipv6 = newf;
addrconf_disable_change(net, newf);
} else if ((!newf) ^ (!old))
dev_disable_change((struct inet6_dev *)table->extra1);
rtnl_unlock();
return 0;
}
static
int addrconf_sysctl_disable(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
loff_t pos = *ppos;
ctl_table lctl;
int ret;
/*
* ctl->data points to idev->cnf.disable_ipv6, we should
* not modify it until we get the rtnl lock.
*/
lctl = *ctl;
lctl.data = &val;
ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
if (write)
ret = addrconf_disable_ipv6(ctl, valp, val);
if (ret)
*ppos = pos;
return ret;
}
static struct addrconf_sysctl_table
{
struct ctl_table_header *sysctl_header;
ctl_table addrconf_vars[DEVCONF_MAX+1];
char *dev_name;
} addrconf_sysctl __read_mostly = {
.sysctl_header = NULL,
.addrconf_vars = {
{
.procname = "forwarding",
.data = &ipv6_devconf.forwarding,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = addrconf_sysctl_forward,
},
{
.procname = "hop_limit",
.data = &ipv6_devconf.hop_limit,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "mtu",
.data = &ipv6_devconf.mtu6,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "accept_ra",
.data = &ipv6_devconf.accept_ra,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "accept_redirects",
.data = &ipv6_devconf.accept_redirects,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "autoconf",
.data = &ipv6_devconf.autoconf,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "dad_transmits",
.data = &ipv6_devconf.dad_transmits,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "router_solicitations",
.data = &ipv6_devconf.rtr_solicits,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "router_solicitation_interval",
.data = &ipv6_devconf.rtr_solicit_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "router_solicitation_delay",
.data = &ipv6_devconf.rtr_solicit_delay,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "force_mld_version",
.data = &ipv6_devconf.force_mld_version,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_IPV6_PRIVACY
{
.procname = "use_tempaddr",
.data = &ipv6_devconf.use_tempaddr,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "temp_valid_lft",
.data = &ipv6_devconf.temp_valid_lft,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "temp_prefered_lft",
.data = &ipv6_devconf.temp_prefered_lft,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "regen_max_retry",
.data = &ipv6_devconf.regen_max_retry,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "max_desync_factor",
.data = &ipv6_devconf.max_desync_factor,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{
.procname = "max_addresses",
.data = &ipv6_devconf.max_addresses,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "accept_ra_defrtr",
.data = &ipv6_devconf.accept_ra_defrtr,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "accept_ra_pinfo",
.data = &ipv6_devconf.accept_ra_pinfo,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_IPV6_ROUTER_PREF
{
.procname = "accept_ra_rtr_pref",
.data = &ipv6_devconf.accept_ra_rtr_pref,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "router_probe_interval",
.data = &ipv6_devconf.rtr_probe_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
#ifdef CONFIG_IPV6_ROUTE_INFO
{
.procname = "accept_ra_rt_info_max_plen",
.data = &ipv6_devconf.accept_ra_rt_info_max_plen,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#endif
{
.procname = "accept_ra_rt_table",
.data = &ipv6_devconf.accept_ra_rt_table,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "proxy_ndp",
.data = &ipv6_devconf.proxy_ndp,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "accept_source_route",
.data = &ipv6_devconf.accept_source_route,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
{
.procname = "optimistic_dad",
.data = &ipv6_devconf.optimistic_dad,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "use_optimistic",
.data = &ipv6_devconf.use_optimistic,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_IPV6_MROUTE
{
.procname = "mc_forwarding",
.data = &ipv6_devconf.mc_forwarding,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
#endif
{
.procname = "disable_ipv6",
.data = &ipv6_devconf.disable_ipv6,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = addrconf_sysctl_disable,
},
{
.procname = "accept_dad",
.data = &ipv6_devconf.accept_dad,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "force_tllao",
.data = &ipv6_devconf.force_tllao,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "accept_ra_prefix_route",
.data = &ipv6_devconf.accept_ra_prefix_route,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_LGE_DHCPV6_WIFI
{
.procname = "ra_info_flag",
.data = &ipv6_devconf.ra_info_flag,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
#endif
{
/* sentinel */
}
},
};
static int __addrconf_sysctl_register(struct net *net, char *dev_name,
struct inet6_dev *idev, struct ipv6_devconf *p)
{
int i;
struct addrconf_sysctl_table *t;
#define ADDRCONF_CTL_PATH_DEV 3
struct ctl_path addrconf_ctl_path[] = {
{ .procname = "net", },
{ .procname = "ipv6", },
{ .procname = "conf", },
{ /* to be set */ },
{ },
};
t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL);
if (t == NULL)
goto out;
for (i = 0; t->addrconf_vars[i].data; i++) {
t->addrconf_vars[i].data += (char *)p - (char *)&ipv6_devconf;
t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */
t->addrconf_vars[i].extra2 = net;
}
/*
* Make a copy of dev_name, because '.procname' is regarded as const
* by sysctl and we wouldn't want anyone to change it under our feet
* (see SIOCSIFNAME).
*/
t->dev_name = kstrdup(dev_name, GFP_KERNEL);
if (!t->dev_name)
goto free;
addrconf_ctl_path[ADDRCONF_CTL_PATH_DEV].procname = t->dev_name;
t->sysctl_header = register_net_sysctl_table(net, addrconf_ctl_path,
t->addrconf_vars);
if (t->sysctl_header == NULL)
goto free_procname;
p->sysctl = t;
return 0;
free_procname:
kfree(t->dev_name);
free:
kfree(t);
out:
return -ENOBUFS;
}
static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
{
struct addrconf_sysctl_table *t;
if (p->sysctl == NULL)
return;
t = p->sysctl;
p->sysctl = NULL;
unregister_net_sysctl_table(t->sysctl_header);
kfree(t->dev_name);
kfree(t);
}
static void addrconf_sysctl_register(struct inet6_dev *idev)
{
neigh_sysctl_register(idev->dev, idev->nd_parms, "ipv6",
&ndisc_ifinfo_sysctl_change);
__addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
idev, &idev->cnf);
}
static void addrconf_sysctl_unregister(struct inet6_dev *idev)
{
__addrconf_sysctl_unregister(&idev->cnf);
neigh_sysctl_unregister(idev->nd_parms);
}
#endif
static int __net_init addrconf_init_net(struct net *net)
{
int err = -ENOMEM;
struct ipv6_devconf *all, *dflt;
all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
if (all == NULL)
goto err_alloc_all;
dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
if (dflt == NULL)
goto err_alloc_dflt;
/* these will be inherited by all namespaces */
dflt->autoconf = ipv6_defaults.autoconf;
dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
net->ipv6.devconf_all = all;
net->ipv6.devconf_dflt = dflt;
#ifdef CONFIG_SYSCTL
err = __addrconf_sysctl_register(net, "all", NULL, all);
if (err < 0)
goto err_reg_all;
err = __addrconf_sysctl_register(net, "default", NULL, dflt);
if (err < 0)
goto err_reg_dflt;
#endif
return 0;
#ifdef CONFIG_SYSCTL
err_reg_dflt:
__addrconf_sysctl_unregister(all);
err_reg_all:
kfree(dflt);
#endif
err_alloc_dflt:
kfree(all);
err_alloc_all:
return err;
}
static void __net_exit addrconf_exit_net(struct net *net)
{
#ifdef CONFIG_SYSCTL
__addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
__addrconf_sysctl_unregister(net->ipv6.devconf_all);
#endif
if (!net_eq(net, &init_net)) {
kfree(net->ipv6.devconf_dflt);
kfree(net->ipv6.devconf_all);
}
}
static struct pernet_operations addrconf_ops = {
.init = addrconf_init_net,
.exit = addrconf_exit_net,
};
/*
* Device notifier
*/
int register_inet6addr_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&inet6addr_chain, nb);
}
EXPORT_SYMBOL(register_inet6addr_notifier);
int unregister_inet6addr_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
}
EXPORT_SYMBOL(unregister_inet6addr_notifier);
static struct rtnl_af_ops inet6_ops = {
.family = AF_INET6,
.fill_link_af = inet6_fill_link_af,
.get_link_af_size = inet6_get_link_af_size,
};
/*
* Init / cleanup code
*/
int __init addrconf_init(void)
{
int i, err;
err = ipv6_addr_label_init();
if (err < 0) {
printk(KERN_CRIT "IPv6 Addrconf:"
" cannot initialize default policy table: %d.\n", err);
goto out;
}
err = register_pernet_subsys(&addrconf_ops);
if (err < 0)
goto out_addrlabel;
/* The addrconf netdev notifier requires that loopback_dev
* has it's ipv6 private information allocated and setup
* before it can bring up and give link-local addresses
* to other devices which are up.
*
* Unfortunately, loopback_dev is not necessarily the first
* entry in the global dev_base list of net devices. In fact,
* it is likely to be the very last entry on that list.
* So this causes the notifier registry below to try and
* give link-local addresses to all devices besides loopback_dev
* first, then loopback_dev, which cases all the non-loopback_dev
* devices to fail to get a link-local address.
*
* So, as a temporary fix, allocate the ipv6 structure for
* loopback_dev first by hand.
* Longer term, all of the dependencies ipv6 has upon the loopback
* device and it being up should be removed.
*/
rtnl_lock();
if (!ipv6_add_dev(init_net.loopback_dev))
err = -ENOMEM;
rtnl_unlock();
if (err)
goto errlo;
for (i = 0; i < IN6_ADDR_HSIZE; i++)
INIT_HLIST_HEAD(&inet6_addr_lst[i]);
register_netdevice_notifier(&ipv6_dev_notf);
addrconf_verify(0);
err = rtnl_af_register(&inet6_ops);
if (err < 0)
goto errout_af;
err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
NULL);
if (err < 0)
goto errout;
/* Only the first call to __rtnl_register can fail */
__rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, NULL);
__rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, NULL);
__rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr,
inet6_dump_ifaddr, NULL);
__rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL,
inet6_dump_ifmcaddr, NULL);
__rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
inet6_dump_ifacaddr, NULL);
ipv6_addr_label_rtnl_register();
return 0;
errout:
rtnl_af_unregister(&inet6_ops);
errout_af:
unregister_netdevice_notifier(&ipv6_dev_notf);
errlo:
unregister_pernet_subsys(&addrconf_ops);
out_addrlabel:
ipv6_addr_label_cleanup();
out:
return err;
}
void addrconf_cleanup(void)
{
struct net_device *dev;
int i;
unregister_netdevice_notifier(&ipv6_dev_notf);
unregister_pernet_subsys(&addrconf_ops);
ipv6_addr_label_cleanup();
rtnl_lock();
__rtnl_af_unregister(&inet6_ops);
/* clean dev list */
for_each_netdev(&init_net, dev) {
if (__in6_dev_get(dev) == NULL)
continue;
addrconf_ifdown(dev, 1);
}
addrconf_ifdown(init_net.loopback_dev, 2);
/*
* Check hash table.
*/
spin_lock_bh(&addrconf_hash_lock);
for (i = 0; i < IN6_ADDR_HSIZE; i++)
WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
spin_unlock_bh(&addrconf_hash_lock);
del_timer(&addr_chk_timer);
rtnl_unlock();
}
| lawnn/Dorimanx-LG-G2-D802-Kernel | net/ipv6/addrconf.c | C | gpl-2.0 | 130,583 |
/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/* #define DEBUG */
#define DEV_DBG_PREFIX "HDMI: "
/* #define REG_DUMP */
#define CEC_MSG_PRINT
#define TOGGLE_CEC_HARDWARE_FSM
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/mutex.h>
#include <mach/msm_hdmi_audio.h>
#include <mach/clk.h>
#include <mach/msm_iomap.h>
#include <mach/socinfo.h>
#include "msm_fb.h"
#include "hdmi_msm.h"
/* Supported HDMI Audio channels */
#define MSM_HDMI_AUDIO_CHANNEL_2 0
#define MSM_HDMI_AUDIO_CHANNEL_4 1
#define MSM_HDMI_AUDIO_CHANNEL_6 2
#define MSM_HDMI_AUDIO_CHANNEL_8 3
#define MSM_HDMI_AUDIO_CHANNEL_MAX 4
#define MSM_HDMI_AUDIO_CHANNEL_FORCE_32BIT 0x7FFFFFFF
/* Supported HDMI Audio sample rates */
#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
#define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1
#define MSM_HDMI_SAMPLE_RATE_48KHZ 2
#define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3
#define MSM_HDMI_SAMPLE_RATE_96KHZ 4
#define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5
#define MSM_HDMI_SAMPLE_RATE_192KHZ 6
#define MSM_HDMI_SAMPLE_RATE_MAX 7
#define MSM_HDMI_SAMPLE_RATE_FORCE_32BIT 0x7FFFFFFF
static int msm_hdmi_sample_rate = MSM_HDMI_SAMPLE_RATE_48KHZ;
/* HDMI/HDCP Registers */
#define HDCP_DDC_STATUS 0x0128
#define HDCP_DDC_CTRL_0 0x0120
#define HDCP_DDC_CTRL_1 0x0124
#define HDMI_DDC_CTRL 0x020C
#define HPD_EVENT_OFFLINE 0
#define HPD_EVENT_ONLINE 1
#define SWITCH_SET_HDMI_AUDIO(d, force) \
do {\
if (!hdmi_msm_is_dvi_mode() &&\
((force) ||\
(external_common_state->audio_sdev.state != (d)))) {\
switch_set_state(&external_common_state->audio_sdev,\
(d));\
DEV_INFO("%s: hdmi_audio state switched to %d\n",\
__func__,\
external_common_state->audio_sdev.state);\
} \
} while (0)
struct workqueue_struct *hdmi_work_queue;
struct hdmi_msm_state_type *hdmi_msm_state;
/* Enable HDCP by default */
static bool hdcp_feature_on = true;
DEFINE_MUTEX(hdmi_msm_state_mutex);
EXPORT_SYMBOL(hdmi_msm_state_mutex);
static DEFINE_MUTEX(hdcp_auth_state_mutex);
static void hdmi_msm_dump_regs(const char *prefix);
static void hdmi_msm_hdcp_enable(void);
static void hdmi_msm_turn_on(void);
static int hdmi_msm_audio_off(void);
static int hdmi_msm_read_edid(void);
static void hdmi_msm_hpd_off(void);
static boolean hdmi_msm_is_dvi_mode(void);
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
static void hdmi_msm_cec_line_latch_detect(void);
#ifdef TOGGLE_CEC_HARDWARE_FSM
static boolean msg_send_complete = TRUE;
static boolean msg_recv_complete = TRUE;
#endif
#define HDMI_MSM_CEC_REFTIMER_REFTIMER_ENABLE BIT(16)
#define HDMI_MSM_CEC_REFTIMER_REFTIMER(___t) (((___t)&0xFFFF) << 0)
#define HDMI_MSM_CEC_TIME_SIGNAL_FREE_TIME(___t) (((___t)&0x1FF) << 7)
#define HDMI_MSM_CEC_TIME_ENABLE BIT(0)
#define HDMI_MSM_CEC_ADDR_LOGICAL_ADDR(___la) (((___la)&0xFF) << 0)
#define HDMI_MSM_CEC_CTRL_LINE_OE BIT(9)
#define HDMI_MSM_CEC_CTRL_FRAME_SIZE(___sz) (((___sz)&0x1F) << 4)
#define HDMI_MSM_CEC_CTRL_SOFT_RESET BIT(2)
#define HDMI_MSM_CEC_CTRL_SEND_TRIG BIT(1)
#define HDMI_MSM_CEC_CTRL_ENABLE BIT(0)
#define HDMI_MSM_CEC_INT_FRAME_RD_DONE_MASK BIT(7)
#define HDMI_MSM_CEC_INT_FRAME_RD_DONE_ACK BIT(6)
#define HDMI_MSM_CEC_INT_FRAME_RD_DONE_INT BIT(6)
#define HDMI_MSM_CEC_INT_MONITOR_MASK BIT(5)
#define HDMI_MSM_CEC_INT_MONITOR_ACK BIT(4)
#define HDMI_MSM_CEC_INT_MONITOR_INT BIT(4)
#define HDMI_MSM_CEC_INT_FRAME_ERROR_MASK BIT(3)
#define HDMI_MSM_CEC_INT_FRAME_ERROR_ACK BIT(2)
#define HDMI_MSM_CEC_INT_FRAME_ERROR_INT BIT(2)
#define HDMI_MSM_CEC_INT_FRAME_WR_DONE_MASK BIT(1)
#define HDMI_MSM_CEC_INT_FRAME_WR_DONE_ACK BIT(0)
#define HDMI_MSM_CEC_INT_FRAME_WR_DONE_INT BIT(0)
#define HDMI_MSM_CEC_FRAME_WR_SUCCESS(___st) (((___st)&0xB) ==\
(HDMI_MSM_CEC_INT_FRAME_WR_DONE_INT |\
HDMI_MSM_CEC_INT_FRAME_WR_DONE_MASK |\
HDMI_MSM_CEC_INT_FRAME_ERROR_MASK))
#define HDMI_MSM_CEC_RETRANSMIT_NUM(___num) (((___num)&0xF) << 4)
#define HDMI_MSM_CEC_RETRANSMIT_ENABLE BIT(0)
#define HDMI_MSM_CEC_WR_DATA_DATA(___d) (((___d)&0xFF) << 8)
void hdmi_msm_cec_init(void)
{
/* 0x02A8 CEC_REFTIMER */
HDMI_OUTP(0x02A8,
HDMI_MSM_CEC_REFTIMER_REFTIMER_ENABLE
| HDMI_MSM_CEC_REFTIMER_REFTIMER(27 * 50)
);
/*
* 0x02A0 CEC_ADDR
* Starting with a default address of 4
*/
HDMI_OUTP(0x02A0, HDMI_MSM_CEC_ADDR_LOGICAL_ADDR(4));
hdmi_msm_state->first_monitor = 0;
hdmi_msm_state->fsm_reset_done = false;
/* 0x029C CEC_INT */
/* Enable CEC interrupts */
HDMI_OUTP(0x029C, \
HDMI_MSM_CEC_INT_FRAME_WR_DONE_MASK \
| HDMI_MSM_CEC_INT_FRAME_ERROR_MASK \
| HDMI_MSM_CEC_INT_MONITOR_MASK \
| HDMI_MSM_CEC_INT_FRAME_RD_DONE_MASK);
HDMI_OUTP(0x02B0, 0x7FF << 4 | 1);
/*
* Slight adjustment to logic 1 low periods on read,
* CEC Test 8.2-3 was failing, 8 for the
* BIT_1_ERR_RANGE_HI = 8 => 750us, the test used 775us,
* so increased this to 9 which => 800us.
*/
/*
* CEC latch up issue - To fire monitor interrupt
* for every start of message
*/
HDMI_OUTP(0x02E0, 0x880000);
/*
* Slight adjustment to logic 0 low period on write
*/
HDMI_OUTP(0x02DC, 0x8888A888);
/*
* Enable Signal Free Time counter and set to 7 bit periods
*/
HDMI_OUTP(0x02A4, 0x1 | (7 * 0x30) << 7);
/* 0x028C CEC_CTRL */
HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE);
}
void hdmi_msm_cec_write_logical_addr(int addr)
{
/* 0x02A0 CEC_ADDR
* LOGICAL_ADDR 7:0 NUM
*/
HDMI_OUTP(0x02A0, addr & 0xFF);
}
void hdmi_msm_dump_cec_msg(struct hdmi_msm_cec_msg *msg)
{
#ifdef CEC_MSG_PRINT
int i;
DEV_DBG("sender_id : %d", msg->sender_id);
DEV_DBG("recvr_id : %d", msg->recvr_id);
if (msg->frame_size < 2) {
DEV_DBG("polling message");
return;
}
DEV_DBG("opcode : %02x", msg->opcode);
for (i = 0; i < msg->frame_size - 2; i++)
DEV_DBG("operand(%2d) : %02x", i + 1, msg->operand[i]);
#endif /* CEC_MSG_PRINT */
}
void hdmi_msm_cec_msg_send(struct hdmi_msm_cec_msg *msg)
{
int i;
uint32 timeout_count = 1;
int retry = 10;
boolean frameType = (msg->recvr_id == 15 ? BIT(0) : 0);
mutex_lock(&hdmi_msm_state_mutex);
hdmi_msm_state->fsm_reset_done = false;
mutex_unlock(&hdmi_msm_state_mutex);
#ifdef TOGGLE_CEC_HARDWARE_FSM
msg_send_complete = FALSE;
#endif
INIT_COMPLETION(hdmi_msm_state->cec_frame_wr_done);
hdmi_msm_state->cec_frame_wr_status = 0;
/* 0x0294 HDMI_MSM_CEC_RETRANSMIT */
HDMI_OUTP(0x0294,
#ifdef DRVR_ONLY_CECT_NO_DAEMON
HDMI_MSM_CEC_RETRANSMIT_NUM(msg->retransmit)
| (msg->retransmit > 0) ? HDMI_MSM_CEC_RETRANSMIT_ENABLE : 0);
#else
HDMI_MSM_CEC_RETRANSMIT_NUM(0) |
HDMI_MSM_CEC_RETRANSMIT_ENABLE);
#endif
/* 0x028C CEC_CTRL */
HDMI_OUTP(0x028C, 0x1 | msg->frame_size << 4);
/* 0x0290 CEC_WR_DATA */
/* header block */
HDMI_OUTP(0x0290,
HDMI_MSM_CEC_WR_DATA_DATA(msg->sender_id << 4 | msg->recvr_id)
| frameType);
/* data block 0 : opcode */
HDMI_OUTP(0x0290,
HDMI_MSM_CEC_WR_DATA_DATA(msg->frame_size < 2 ? 0 : msg->opcode)
| frameType);
/* data block 1-14 : operand 0-13 */
for (i = 0; i < msg->frame_size - 1; i++)
HDMI_OUTP(0x0290,
HDMI_MSM_CEC_WR_DATA_DATA(msg->operand[i])
| (msg->recvr_id == 15 ? BIT(0) : 0));
for (; i < 14; i++)
HDMI_OUTP(0x0290,
HDMI_MSM_CEC_WR_DATA_DATA(0)
| (msg->recvr_id == 15 ? BIT(0) : 0));
while ((HDMI_INP(0x0298) & 1) && retry--) {
DEV_DBG("CEC line is busy(%d)\n", retry);
schedule();
}
/* 0x028C CEC_CTRL */
HDMI_OUTP(0x028C,
HDMI_MSM_CEC_CTRL_LINE_OE
| HDMI_MSM_CEC_CTRL_FRAME_SIZE(msg->frame_size)
| HDMI_MSM_CEC_CTRL_SEND_TRIG
| HDMI_MSM_CEC_CTRL_ENABLE);
timeout_count = wait_for_completion_interruptible_timeout(
&hdmi_msm_state->cec_frame_wr_done, HZ);
if (!timeout_count) {
hdmi_msm_state->cec_frame_wr_status |= CEC_STATUS_WR_TMOUT;
DEV_ERR("%s: timedout", __func__);
hdmi_msm_dump_cec_msg(msg);
} else {
DEV_DBG("CEC write frame done (frame len=%d)",
msg->frame_size);
hdmi_msm_dump_cec_msg(msg);
}
#ifdef TOGGLE_CEC_HARDWARE_FSM
if (!msg_recv_complete) {
/* Toggle CEC hardware FSM */
HDMI_OUTP(0x028C, 0x0);
HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE);
msg_recv_complete = TRUE;
}
msg_send_complete = TRUE;
#else
HDMI_OUTP(0x028C, 0x0);
HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE);
#endif
}
void hdmi_msm_cec_line_latch_detect(void)
{
/*
* CECT 9-5-1
* The timer period needs to be changed to appropriate value
*/
/*
* Timedout without RD_DONE, WR_DONE or ERR_INT
* Toggle CEC hardware FSM
*/
mutex_lock(&hdmi_msm_state_mutex);
if (hdmi_msm_state->first_monitor == 1) {
DEV_WARN("CEC line is probably latched up - CECT 9-5-1");
if (!msg_recv_complete)
hdmi_msm_state->fsm_reset_done = true;
HDMI_OUTP(0x028C, 0x0);
HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE);
hdmi_msm_state->first_monitor = 0;
}
mutex_unlock(&hdmi_msm_state_mutex);
}
void hdmi_msm_cec_msg_recv(void)
{
uint32 data;
int i;
#ifdef DRVR_ONLY_CECT_NO_DAEMON
struct hdmi_msm_cec_msg temp_msg;
#endif
mutex_lock(&hdmi_msm_state_mutex);
if (hdmi_msm_state->cec_queue_wr == hdmi_msm_state->cec_queue_rd
&& hdmi_msm_state->cec_queue_full) {
mutex_unlock(&hdmi_msm_state_mutex);
DEV_ERR("CEC message queue is overflowing\n");
#ifdef DRVR_ONLY_CECT_NO_DAEMON
/*
* Without CEC daemon:
* Compliance tests fail once the queue gets filled up.
* so reset the pointers to the start of the queue.
*/
hdmi_msm_state->cec_queue_wr = hdmi_msm_state->cec_queue_start;
hdmi_msm_state->cec_queue_rd = hdmi_msm_state->cec_queue_start;
hdmi_msm_state->cec_queue_full = false;
#else
return;
#endif
}
if (hdmi_msm_state->cec_queue_wr == NULL) {
DEV_ERR("%s: wp is NULL\n", __func__);
return;
}
mutex_unlock(&hdmi_msm_state_mutex);
/* 0x02AC CEC_RD_DATA */
data = HDMI_INP(0x02AC);
hdmi_msm_state->cec_queue_wr->sender_id = (data & 0xF0) >> 4;
hdmi_msm_state->cec_queue_wr->recvr_id = (data & 0x0F);
hdmi_msm_state->cec_queue_wr->frame_size = (data & 0x1F00) >> 8;
DEV_DBG("Recvd init=[%u] dest=[%u] size=[%u]\n",
hdmi_msm_state->cec_queue_wr->sender_id,
hdmi_msm_state->cec_queue_wr->recvr_id,
hdmi_msm_state->cec_queue_wr->frame_size);
if (hdmi_msm_state->cec_queue_wr->frame_size < 1) {
DEV_ERR("%s: invalid message (frame length = %d)",
__func__, hdmi_msm_state->cec_queue_wr->frame_size);
return;
} else if (hdmi_msm_state->cec_queue_wr->frame_size == 1) {
DEV_DBG("%s: polling message (dest[%x] <- init[%x])",
__func__,
hdmi_msm_state->cec_queue_wr->recvr_id,
hdmi_msm_state->cec_queue_wr->sender_id);
return;
}
/* data block 0 : opcode */
data = HDMI_INP(0x02AC);
hdmi_msm_state->cec_queue_wr->opcode = data & 0xFF;
/* data block 1-14 : operand 0-13 */
for (i = 0; i < hdmi_msm_state->cec_queue_wr->frame_size - 2; i++) {
data = HDMI_INP(0x02AC);
hdmi_msm_state->cec_queue_wr->operand[i] = data & 0xFF;
}
for (; i < 14; i++)
hdmi_msm_state->cec_queue_wr->operand[i] = 0;
DEV_DBG("CEC read frame done\n");
DEV_DBG("=======================================\n");
hdmi_msm_dump_cec_msg(hdmi_msm_state->cec_queue_wr);
DEV_DBG("=======================================\n");
#ifdef DRVR_ONLY_CECT_NO_DAEMON
switch (hdmi_msm_state->cec_queue_wr->opcode) {
case 0x64:
/* Set OSD String */
DEV_INFO("Recvd OSD Str=[%x]\n",\
hdmi_msm_state->cec_queue_wr->operand[3]);
break;
case 0x83:
/* Give Phy Addr */
DEV_INFO("Recvd a Give Phy Addr cmd\n");
memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg));
/* Setup a frame for sending out phy addr */
temp_msg.sender_id = 0x4;
/* Broadcast */
temp_msg.recvr_id = 0xf;
temp_msg.opcode = 0x84;
i = 0;
temp_msg.operand[i++] = 0x10;
temp_msg.operand[i++] = 0x00;
temp_msg.operand[i++] = 0x04;
temp_msg.frame_size = i + 2;
hdmi_msm_cec_msg_send(&temp_msg);
break;
case 0xFF:
/* Abort */
DEV_INFO("Recvd an abort cmd 0xFF\n");
memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg));
temp_msg.sender_id = 0x4;
temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id;
i = 0;
/*feature abort */
temp_msg.opcode = 0x00;
temp_msg.operand[i++] =
hdmi_msm_state->cec_queue_wr->opcode;
/*reason for abort = "Refused" */
temp_msg.operand[i++] = 0x04;
temp_msg.frame_size = i + 2;
hdmi_msm_dump_cec_msg(&temp_msg);
hdmi_msm_cec_msg_send(&temp_msg);
break;
case 0x046:
/* Give OSD name */
DEV_INFO("Recvd cmd 0x046\n");
memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg));
temp_msg.sender_id = 0x4;
temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id;
i = 0;
/* OSD Name */
temp_msg.opcode = 0x47;
/* Display control byte */
temp_msg.operand[i++] = 0x00;
temp_msg.operand[i++] = 'H';
temp_msg.operand[i++] = 'e';
temp_msg.operand[i++] = 'l';
temp_msg.operand[i++] = 'l';
temp_msg.operand[i++] = 'o';
temp_msg.operand[i++] = ' ';
temp_msg.operand[i++] = 'W';
temp_msg.operand[i++] = 'o';
temp_msg.operand[i++] = 'r';
temp_msg.operand[i++] = 'l';
temp_msg.operand[i++] = 'd';
temp_msg.frame_size = i + 2;
hdmi_msm_cec_msg_send(&temp_msg);
break;
case 0x08F:
/* Give Device Power status */
DEV_INFO("Recvd a Power status message\n");
memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg));
temp_msg.sender_id = 0x4;
temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id;
i = 0;
/* OSD String */
temp_msg.opcode = 0x90;
temp_msg.operand[i++] = 'H';
temp_msg.operand[i++] = 'e';
temp_msg.operand[i++] = 'l';
temp_msg.operand[i++] = 'l';
temp_msg.operand[i++] = 'o';
temp_msg.operand[i++] = ' ';
temp_msg.operand[i++] = 'W';
temp_msg.operand[i++] = 'o';
temp_msg.operand[i++] = 'r';
temp_msg.operand[i++] = 'l';
temp_msg.operand[i++] = 'd';
temp_msg.frame_size = i + 2;
hdmi_msm_cec_msg_send(&temp_msg);
break;
case 0x080:
/* Routing Change cmd */
case 0x086:
/* Set Stream Path */
DEV_INFO("Recvd Set Stream\n");
memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg));
temp_msg.sender_id = 0x4;
/*Broadcast this message*/
temp_msg.recvr_id = 0xf;
i = 0;
temp_msg.opcode = 0x82; /* Active Source */
temp_msg.operand[i++] = 0x10;
temp_msg.operand[i++] = 0x00;
temp_msg.frame_size = i + 2;
hdmi_msm_cec_msg_send(&temp_msg);
/*
* sending <Image View On> message
*/
memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg));
temp_msg.sender_id = 0x4;
temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id;
i = 0;
/* opcode for Image View On */
temp_msg.opcode = 0x04;
temp_msg.frame_size = i + 2;
hdmi_msm_cec_msg_send(&temp_msg);
break;
case 0x44:
/* User Control Pressed */
DEV_INFO("User Control Pressed\n");
break;
case 0x45:
/* User Control Released */
DEV_INFO("User Control Released\n");
break;
default:
DEV_INFO("Recvd an unknown cmd = [%u]\n",
hdmi_msm_state->cec_queue_wr->opcode);
#ifdef __SEND_ABORT__
memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg));
temp_msg.sender_id = 0x4;
temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id;
i = 0;
/* opcode for feature abort */
temp_msg.opcode = 0x00;
temp_msg.operand[i++] =
hdmi_msm_state->cec_queue_wr->opcode;
/*reason for abort = "Unrecognized opcode" */
temp_msg.operand[i++] = 0x00;
temp_msg.frame_size = i + 2;
hdmi_msm_cec_msg_send(&temp_msg);
break;
#else
memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg));
temp_msg.sender_id = 0x4;
temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id;
i = 0;
/* OSD String */
temp_msg.opcode = 0x64;
temp_msg.operand[i++] = 0x0;
temp_msg.operand[i++] = 'H';
temp_msg.operand[i++] = 'e';
temp_msg.operand[i++] = 'l';
temp_msg.operand[i++] = 'l';
temp_msg.operand[i++] = 'o';
temp_msg.operand[i++] = ' ';
temp_msg.operand[i++] = 'W';
temp_msg.operand[i++] = 'o';
temp_msg.operand[i++] = 'r';
temp_msg.operand[i++] = 'l';
temp_msg.operand[i++] = 'd';
temp_msg.frame_size = i + 2;
hdmi_msm_cec_msg_send(&temp_msg);
break;
#endif /* __SEND_ABORT__ */
}
#endif /* DRVR_ONLY_CECT_NO_DAEMON */
mutex_lock(&hdmi_msm_state_mutex);
hdmi_msm_state->cec_queue_wr++;
if (hdmi_msm_state->cec_queue_wr == CEC_QUEUE_END)
hdmi_msm_state->cec_queue_wr = hdmi_msm_state->cec_queue_start;
if (hdmi_msm_state->cec_queue_wr == hdmi_msm_state->cec_queue_rd)
hdmi_msm_state->cec_queue_full = true;
mutex_unlock(&hdmi_msm_state_mutex);
DEV_DBG("Exiting %s()\n", __func__);
}
void hdmi_msm_cec_one_touch_play(void)
{
struct hdmi_msm_cec_msg temp_msg;
uint32 i = 0;
memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg));
temp_msg.sender_id = 0x4;
/*
* Broadcast this message
*/
temp_msg.recvr_id = 0xf;
i = 0;
/* Active Source */
temp_msg.opcode = 0x82;
temp_msg.operand[i++] = 0x10;
temp_msg.operand[i++] = 0x00;
/*temp_msg.operand[i++] = 0x04;*/
temp_msg.frame_size = i + 2;
hdmi_msm_cec_msg_send(&temp_msg);
/*
* sending <Image View On> message
*/
memset(&temp_msg, 0x00, sizeof(struct hdmi_msm_cec_msg));
temp_msg.sender_id = 0x4;
temp_msg.recvr_id = hdmi_msm_state->cec_queue_wr->sender_id;
i = 0;
/* Image View On */
temp_msg.opcode = 0x04;
temp_msg.frame_size = i + 2;
hdmi_msm_cec_msg_send(&temp_msg);
}
#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT */
uint32 hdmi_msm_get_io_base(void)
{
return (uint32)MSM_HDMI_BASE;
}
EXPORT_SYMBOL(hdmi_msm_get_io_base);
/* Table indicating the video format supported by the HDMI TX Core v1.0 */
/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
static void hdmi_msm_setup_video_mode_lut(void)
{
HDMI_SETUP_LUT(640x480p60_4_3);
HDMI_SETUP_LUT(720x480p60_4_3);
HDMI_SETUP_LUT(720x480p60_16_9);
HDMI_SETUP_LUT(1280x720p60_16_9);
HDMI_SETUP_LUT(1920x1080i60_16_9);
HDMI_SETUP_LUT(1440x480i60_4_3);
HDMI_SETUP_LUT(1440x480i60_16_9);
HDMI_SETUP_LUT(1920x1080p60_16_9);
HDMI_SETUP_LUT(720x576p50_4_3);
HDMI_SETUP_LUT(720x576p50_16_9);
HDMI_SETUP_LUT(1280x720p50_16_9);
HDMI_SETUP_LUT(1440x576i50_4_3);
HDMI_SETUP_LUT(1440x576i50_16_9);
HDMI_SETUP_LUT(1920x1080p50_16_9);
HDMI_SETUP_LUT(1920x1080p24_16_9);
HDMI_SETUP_LUT(1920x1080p25_16_9);
HDMI_SETUP_LUT(1920x1080p30_16_9);
}
#ifdef PORT_DEBUG
const char *hdmi_msm_name(uint32 offset)
{
switch (offset) {
case 0x0000: return "CTRL";
case 0x0020: return "AUDIO_PKT_CTRL1";
case 0x0024: return "ACR_PKT_CTRL";
case 0x0028: return "VBI_PKT_CTRL";
case 0x002C: return "INFOFRAME_CTRL0";
#ifdef CONFIG_FB_MSM_HDMI_3D
case 0x0034: return "GEN_PKT_CTRL";
#endif
case 0x003C: return "ACP";
case 0x0040: return "GC";
case 0x0044: return "AUDIO_PKT_CTRL2";
case 0x0048: return "ISRC1_0";
case 0x004C: return "ISRC1_1";
case 0x0050: return "ISRC1_2";
case 0x0054: return "ISRC1_3";
case 0x0058: return "ISRC1_4";
case 0x005C: return "ISRC2_0";
case 0x0060: return "ISRC2_1";
case 0x0064: return "ISRC2_2";
case 0x0068: return "ISRC2_3";
case 0x006C: return "AVI_INFO0";
case 0x0070: return "AVI_INFO1";
case 0x0074: return "AVI_INFO2";
case 0x0078: return "AVI_INFO3";
#ifdef CONFIG_FB_MSM_HDMI_3D
case 0x0084: return "GENERIC0_HDR";
case 0x0088: return "GENERIC0_0";
case 0x008C: return "GENERIC0_1";
#endif
case 0x00C4: return "ACR_32_0";
case 0x00C8: return "ACR_32_1";
case 0x00CC: return "ACR_44_0";
case 0x00D0: return "ACR_44_1";
case 0x00D4: return "ACR_48_0";
case 0x00D8: return "ACR_48_1";
case 0x00E4: return "AUDIO_INFO0";
case 0x00E8: return "AUDIO_INFO1";
case 0x0110: return "HDCP_CTRL";
case 0x0114: return "HDCP_DEBUG_CTRL";
case 0x0118: return "HDCP_INT_CTRL";
case 0x011C: return "HDCP_LINK0_STATUS";
case 0x012C: return "HDCP_ENTROPY_CTRL0";
case 0x0130: return "HDCP_RESET";
case 0x0134: return "HDCP_RCVPORT_DATA0";
case 0x0138: return "HDCP_RCVPORT_DATA1";
case 0x013C: return "HDCP_RCVPORT_DATA2";
case 0x0144: return "HDCP_RCVPORT_DATA3";
case 0x0148: return "HDCP_RCVPORT_DATA4";
case 0x014C: return "HDCP_RCVPORT_DATA5";
case 0x0150: return "HDCP_RCVPORT_DATA6";
case 0x0168: return "HDCP_RCVPORT_DATA12";
case 0x01D0: return "AUDIO_CFG";
case 0x0208: return "USEC_REFTIMER";
case 0x020C: return "DDC_CTRL";
case 0x0214: return "DDC_INT_CTRL";
case 0x0218: return "DDC_SW_STATUS";
case 0x021C: return "DDC_HW_STATUS";
case 0x0220: return "DDC_SPEED";
case 0x0224: return "DDC_SETUP";
case 0x0228: return "DDC_TRANS0";
case 0x022C: return "DDC_TRANS1";
case 0x0238: return "DDC_DATA";
case 0x0250: return "HPD_INT_STATUS";
case 0x0254: return "HPD_INT_CTRL";
case 0x0258: return "HPD_CTRL";
case 0x025C: return "HDCP_ENTROPY_CTRL1";
case 0x027C: return "DDC_REF";
case 0x0284: return "HDCP_SW_UPPER_AKSV";
case 0x0288: return "HDCP_SW_LOWER_AKSV";
case 0x02B4: return "ACTIVE_H";
case 0x02B8: return "ACTIVE_V";
case 0x02BC: return "ACTIVE_V_F2";
case 0x02C0: return "TOTAL";
case 0x02C4: return "V_TOTAL_F2";
case 0x02C8: return "FRAME_CTRL";
case 0x02CC: return "AUD_INT";
case 0x0300: return "PHY_REG0";
case 0x0304: return "PHY_REG1";
case 0x0308: return "PHY_REG2";
case 0x030C: return "PHY_REG3";
case 0x0310: return "PHY_REG4";
case 0x0314: return "PHY_REG5";
case 0x0318: return "PHY_REG6";
case 0x031C: return "PHY_REG7";
case 0x0320: return "PHY_REG8";
case 0x0324: return "PHY_REG9";
case 0x0328: return "PHY_REG10";
case 0x032C: return "PHY_REG11";
case 0x0330: return "PHY_REG12";
default: return "???";
}
}
void hdmi_outp(uint32 offset, uint32 value)
{
uint32 in_val;
outpdw(MSM_HDMI_BASE+offset, value);
in_val = inpdw(MSM_HDMI_BASE+offset);
DEV_DBG("HDMI[%04x] => %08x [%08x] %s\n",
offset, value, in_val, hdmi_msm_name(offset));
}
uint32 hdmi_inp(uint32 offset)
{
uint32 value = inpdw(MSM_HDMI_BASE+offset);
DEV_DBG("HDMI[%04x] <= %08x %s\n",
offset, value, hdmi_msm_name(offset));
return value;
}
#endif /* DEBUG */
static void hdmi_msm_turn_on(void);
static int hdmi_msm_audio_off(void);
static int hdmi_msm_read_edid(void);
static void hdmi_msm_hpd_off(void);
static bool hdmi_ready(void)
{
return MSM_HDMI_BASE &&
hdmi_msm_state &&
hdmi_msm_state->hdmi_app_clk &&
hdmi_msm_state->hpd_initialized;
}
static void hdmi_msm_send_event(boolean on)
{
char *envp[2];
/* QDSP OFF preceding the HPD event notification */
envp[0] = "HDCP_STATE=FAIL";
envp[1] = NULL;
DEV_ERR("hdmi: HDMI HPD: QDSP OFF\n");
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
if (on) {
/* Build EDID table */
hdmi_msm_read_edid();
switch_set_state(&external_common_state->sdev, 1);
DEV_INFO("%s: hdmi state switched to %d\n", __func__,
external_common_state->sdev.state);
DEV_INFO("HDMI HPD: CONNECTED: send ONLINE\n");
kobject_uevent(external_common_state->uevent_kobj, KOBJ_ONLINE);
if (!hdmi_msm_state->hdcp_enable) {
/* Send Audio for HDMI Compliance Cases*/
envp[0] = "HDCP_STATE=PASS";
envp[1] = NULL;
DEV_INFO("HDMI HPD: sense : send HDCP_PASS\n");
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
}
} else {
switch_set_state(&external_common_state->sdev, 0);
DEV_INFO("%s: hdmi state switch to %d\n", __func__,
external_common_state->sdev.state);
DEV_INFO("hdmi: HDMI HPD: sense DISCONNECTED: send OFFLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_OFFLINE);
}
/*[ECID:000000] ZTEBSP wanghaifei start 20130221, add qcom new patch for HDP resume wait*/
// if (!completion_done(&hdmi_msm_state->hpd_event_processed))
// complete(&hdmi_msm_state->hpd_event_processed);
/*[ECID:000000] ZTEBSP wanghaifei end 20130221, add qcom new patch for HDP resume wait*/
}
static void hdmi_msm_hpd_state_work(struct work_struct *work)
{
if (!hdmi_ready()) {
DEV_ERR("hdmi: %s: ignored, probe failed\n", __func__);
return;
}
hdmi_msm_send_event(external_common_state->hpd_state);
}
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
static void hdmi_msm_cec_latch_work(struct work_struct *work)
{
hdmi_msm_cec_line_latch_detect();
}
#endif
static void hdcp_deauthenticate(void);
static void hdmi_msm_hdcp_reauth_work(struct work_struct *work)
{
if (!hdmi_msm_state->hdcp_enable) {
DEV_DBG("%s: HDCP not enabled\n", __func__);
return;
}
/* Don't process recursive actions */
mutex_lock(&hdmi_msm_state_mutex);
if (hdmi_msm_state->hdcp_activating) {
mutex_unlock(&hdmi_msm_state_mutex);
return;
}
mutex_unlock(&hdmi_msm_state_mutex);
/*
* Reauth=>deauth, hdcp_auth
* hdcp_auth=>turn_on() which calls
* HDMI Core reset without informing the Audio QDSP
* this can do bad things to video playback on the HDTV
* Therefore, as surprising as it may sound do reauth
* only if the device is HDCP-capable
*/
hdcp_deauthenticate();
mutex_lock(&hdcp_auth_state_mutex);
hdmi_msm_state->reauth = TRUE;
mutex_unlock(&hdcp_auth_state_mutex);
mod_timer(&hdmi_msm_state->hdcp_timer, jiffies + HZ/2);
}
static void hdmi_msm_hdcp_work(struct work_struct *work)
{
if (!hdmi_msm_state->hdcp_enable) {
DEV_DBG("%s: HDCP not enabled\n", __func__);
return;
}
/* Only re-enable if cable still connected */
mutex_lock(&external_common_state_hpd_mutex);
if (external_common_state->hpd_state &&
!(hdmi_msm_state->full_auth_done)) {
mutex_unlock(&external_common_state_hpd_mutex);
if (hdmi_msm_state->reauth == TRUE) {
DEV_DBG("%s: Starting HDCP re-authentication\n",
__func__);
hdmi_msm_turn_on();
} else {
DEV_DBG("%s: Starting HDCP authentication\n", __func__);
hdmi_msm_hdcp_enable();
}
} else {
mutex_unlock(&external_common_state_hpd_mutex);
DEV_DBG("%s: HDMI not connected or HDCP already active\n",
__func__);
hdmi_msm_state->reauth = FALSE;
}
}
int hdmi_msm_process_hdcp_interrupts(void)
{
int rc = -1;
uint32 hdcp_int_val;
char *envp[2];
if (!hdmi_msm_state->hdcp_enable) {
DEV_DBG("%s: HDCP not enabled\n", __func__);
return -EINVAL;
}
/* HDCP_INT_CTRL[0x0118]
* [0] AUTH_SUCCESS_INT [R] HDCP Authentication Success
* interrupt status
* [1] AUTH_SUCCESS_ACK [W] Acknowledge bit for HDCP
* Authentication Success bit - write 1 to clear
* [2] AUTH_SUCCESS_MASK [R/W] Mask bit for HDCP Authentication
* Success interrupt - set to 1 to enable interrupt */
hdcp_int_val = HDMI_INP_ND(0x0118);
if ((hdcp_int_val & (1 << 2)) && (hdcp_int_val & (1 << 0))) {
/* AUTH_SUCCESS_INT */
HDMI_OUTP(0x0118, (hdcp_int_val | (1 << 1)) & ~(1 << 0));
DEV_INFO("HDCP: AUTH_SUCCESS_INT received\n");
complete_all(&hdmi_msm_state->hdcp_success_done);
return 0;
}
/* [4] AUTH_FAIL_INT [R] HDCP Authentication Lost
* interrupt Status
* [5] AUTH_FAIL_ACK [W] Acknowledge bit for HDCP
* Authentication Lost bit - write 1 to clear
* [6] AUTH_FAIL_MASK [R/W] Mask bit fo HDCP Authentication
* Lost interrupt set to 1 to enable interrupt
* [7] AUTH_FAIL_INFO_ACK [W] Acknowledge bit for HDCP
* Authentication Failure Info field - write 1 to clear */
if ((hdcp_int_val & (1 << 6)) && (hdcp_int_val & (1 << 4))) {
/* AUTH_FAIL_INT */
/* Clear and Disable */
uint32 link_status = HDMI_INP_ND(0x011C);
HDMI_OUTP(0x0118, (hdcp_int_val | (1 << 5))
& ~((1 << 6) | (1 << 4)));
DEV_INFO("HDCP: AUTH_FAIL_INT received, LINK0_STATUS=0x%08x\n",
link_status);
if (hdmi_msm_state->full_auth_done) {
SWITCH_SET_HDMI_AUDIO(0, 0);
envp[0] = "HDCP_STATE=FAIL";
envp[1] = NULL;
DEV_INFO("HDMI HPD:QDSP OFF\n");
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
mutex_lock(&hdcp_auth_state_mutex);
hdmi_msm_state->full_auth_done = FALSE;
mutex_unlock(&hdcp_auth_state_mutex);
/* Calling reauth only when authentication
* is sucessful or else we always go into
* the reauth loop. Also, No need to reauthenticate
* if authentication failed because of cable disconnect
*/
if (((link_status & 0xF0) >> 4) != 0x7) {
DEV_DBG("Reauthenticate From %s HDCP FAIL INT ",
__func__);
queue_work(hdmi_work_queue,
&hdmi_msm_state->hdcp_reauth_work);
} else {
DEV_INFO("HDCP: HDMI cable disconnected\n");
}
}
/* Clear AUTH_FAIL_INFO as well */
HDMI_OUTP(0x0118, (hdcp_int_val | (1 << 7)));
return 0;
}
/* [8] DDC_XFER_REQ_INT [R] HDCP DDC Transfer Request
* interrupt status
* [9] DDC_XFER_REQ_ACK [W] Acknowledge bit for HDCP DDC
* Transfer Request bit - write 1 to clear
* [10] DDC_XFER_REQ_MASK [R/W] Mask bit for HDCP DDC Transfer
* Request interrupt - set to 1 to enable interrupt */
if ((hdcp_int_val & (1 << 10)) && (hdcp_int_val & (1 << 8))) {
/* DDC_XFER_REQ_INT */
HDMI_OUTP_ND(0x0118, (hdcp_int_val | (1 << 9)) & ~(1 << 8));
if (!(hdcp_int_val & (1 << 12)))
return 0;
}
/* [12] DDC_XFER_DONE_INT [R] HDCP DDC Transfer done interrupt
* status
* [13] DDC_XFER_DONE_ACK [W] Acknowledge bit for HDCP DDC
* Transfer done bit - write 1 to clear
* [14] DDC_XFER_DONE_MASK [R/W] Mask bit for HDCP DDC Transfer
* done interrupt - set to 1 to enable interrupt */
if ((hdcp_int_val & (1 << 14)) && (hdcp_int_val & (1 << 12))) {
/* DDC_XFER_DONE_INT */
HDMI_OUTP_ND(0x0118, (hdcp_int_val | (1 << 13)) & ~(1 << 12));
DEV_INFO("HDCP: DDC_XFER_DONE received\n");
return 0;
}
return rc;
}
static irqreturn_t hdmi_msm_isr(int irq, void *dev_id)
{
uint32 hpd_int_status;
uint32 hpd_int_ctrl;
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
uint32 cec_intr_status;
#endif
uint32 ddc_int_ctrl;
uint32 audio_int_val;
static uint32 fifo_urun_int_occurred;
static uint32 sample_drop_int_occurred;
const uint32 occurrence_limit = 5;
if (!hdmi_ready()) {
DEV_DBG("ISR ignored, probe failed\n");
return IRQ_HANDLED;
}
/* Process HPD Interrupt */
/* HDMI_HPD_INT_STATUS[0x0250] */
hpd_int_status = HDMI_INP_ND(0x0250);
/* HDMI_HPD_INT_CTRL[0x0254] */
hpd_int_ctrl = HDMI_INP_ND(0x0254);
if ((hpd_int_ctrl & (1 << 2)) && (hpd_int_status & (1 << 0))) {
/*
* Got HPD interrupt. Ack the interrupt and disable any
* further HPD interrupts until we process this interrupt.
*/
HDMI_OUTP(0x0254, ((hpd_int_ctrl | (BIT(0))) & ~BIT(2)));
external_common_state->hpd_state =
(HDMI_INP(0x0250) & BIT(1)) >> 1;
DEV_DBG("%s: Queuing work to handle HPD %s event\n", __func__,
external_common_state->hpd_state ? "connect" :
"disconnect");
queue_work(hdmi_work_queue, &hdmi_msm_state->hpd_state_work);
return IRQ_HANDLED;
}
/* Process DDC Interrupts */
/* HDMI_DDC_INT_CTRL[0x0214] */
ddc_int_ctrl = HDMI_INP_ND(0x0214);
if ((ddc_int_ctrl & (1 << 2)) && (ddc_int_ctrl & (1 << 0))) {
/* SW_DONE INT occured, clr it */
HDMI_OUTP_ND(0x0214, ddc_int_ctrl | (1 << 1));
complete(&hdmi_msm_state->ddc_sw_done);
return IRQ_HANDLED;
}
/* FIFO Underrun Int is enabled */
/* HDMI_AUD_INT[0x02CC]
* [3] AUD_SAM_DROP_MASK [R/W]
* [2] AUD_SAM_DROP_ACK [W], AUD_SAM_DROP_INT [R]
* [1] AUD_FIFO_URUN_MASK [R/W]
* [0] AUD_FIFO_URUN_ACK [W], AUD_FIFO_URUN_INT [R] */
audio_int_val = HDMI_INP_ND(0x02CC);
if ((audio_int_val & (1 << 1)) && (audio_int_val & (1 << 0))) {
/* FIFO Underrun occured, clr it */
HDMI_OUTP(0x02CC, audio_int_val | (1 << 0));
++fifo_urun_int_occurred;
DEV_INFO("HDMI AUD_FIFO_URUN: %d\n", fifo_urun_int_occurred);
if (fifo_urun_int_occurred >= occurrence_limit) {
HDMI_OUTP(0x02CC, HDMI_INP(0x02CC) & ~(1 << 1));
DEV_INFO("HDMI AUD_FIFO_URUN: INT has been disabled "
"by the ISR after %d occurences...\n",
fifo_urun_int_occurred);
}
return IRQ_HANDLED;
}
/* Audio Sample Drop int is enabled */
if ((audio_int_val & (1 << 3)) && (audio_int_val & (1 << 2))) {
/* Audio Sample Drop occured, clr it */
HDMI_OUTP(0x02CC, audio_int_val | (1 << 2));
DEV_DBG("%s: AUD_SAM_DROP", __func__);
++sample_drop_int_occurred;
if (sample_drop_int_occurred >= occurrence_limit) {
HDMI_OUTP(0x02CC, HDMI_INP(0x02CC) & ~(1 << 3));
DEV_INFO("HDMI AUD_SAM_DROP: INT has been disabled "
"by the ISR after %d occurences...\n",
sample_drop_int_occurred);
}
return IRQ_HANDLED;
}
if (!hdmi_msm_process_hdcp_interrupts())
return IRQ_HANDLED;
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
/* Process CEC Interrupt */
/* HDMI_MSM_CEC_INT[0x029C] */
cec_intr_status = HDMI_INP_ND(0x029C);
DEV_DBG("cec interrupt status is [%u]\n", cec_intr_status);
if (HDMI_MSM_CEC_FRAME_WR_SUCCESS(cec_intr_status)) {
DEV_DBG("CEC_IRQ_FRAME_WR_DONE\n");
HDMI_OUTP(0x029C, cec_intr_status |
HDMI_MSM_CEC_INT_FRAME_WR_DONE_ACK);
mutex_lock(&hdmi_msm_state_mutex);
hdmi_msm_state->cec_frame_wr_status |= CEC_STATUS_WR_DONE;
hdmi_msm_state->first_monitor = 0;
del_timer(&hdmi_msm_state->cec_read_timer);
mutex_unlock(&hdmi_msm_state_mutex);
complete(&hdmi_msm_state->cec_frame_wr_done);
return IRQ_HANDLED;
}
if ((cec_intr_status & (1 << 2)) && (cec_intr_status & (1 << 3))) {
DEV_DBG("CEC_IRQ_FRAME_ERROR\n");
#ifdef TOGGLE_CEC_HARDWARE_FSM
/* Toggle CEC hardware FSM */
HDMI_OUTP(0x028C, 0x0);
HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE);
#endif
HDMI_OUTP(0x029C, cec_intr_status);
mutex_lock(&hdmi_msm_state_mutex);
hdmi_msm_state->first_monitor = 0;
del_timer(&hdmi_msm_state->cec_read_timer);
hdmi_msm_state->cec_frame_wr_status |= CEC_STATUS_WR_ERROR;
mutex_unlock(&hdmi_msm_state_mutex);
complete(&hdmi_msm_state->cec_frame_wr_done);
return IRQ_HANDLED;
}
if ((cec_intr_status & (1 << 4)) && (cec_intr_status & (1 << 5))) {
DEV_DBG("CEC_IRQ_MONITOR\n");
HDMI_OUTP(0x029C, cec_intr_status |
HDMI_MSM_CEC_INT_MONITOR_ACK);
/*
* CECT 9-5-1
* On the first occassion start a timer
* for few hundred ms, if it expires then
* reset the CEC block else go on with
* frame transactions as usual.
* Below adds hdmi_msm_cec_msg_recv() as an
* item into the work queue instead of running in
* interrupt context
*/
mutex_lock(&hdmi_msm_state_mutex);
if (hdmi_msm_state->first_monitor == 0) {
/* This timer might have to be changed
* worst case theoritical =
* 16 bytes * 8 * 2.7msec = 346 msec
*/
mod_timer(&hdmi_msm_state->cec_read_timer,
jiffies + HZ/2);
hdmi_msm_state->first_monitor = 1;
}
mutex_unlock(&hdmi_msm_state_mutex);
return IRQ_HANDLED;
}
if ((cec_intr_status & (1 << 6)) && (cec_intr_status & (1 << 7))) {
DEV_DBG("CEC_IRQ_FRAME_RD_DONE\n");
mutex_lock(&hdmi_msm_state_mutex);
hdmi_msm_state->first_monitor = 0;
del_timer(&hdmi_msm_state->cec_read_timer);
mutex_unlock(&hdmi_msm_state_mutex);
HDMI_OUTP(0x029C, cec_intr_status |
HDMI_MSM_CEC_INT_FRAME_RD_DONE_ACK);
hdmi_msm_cec_msg_recv();
#ifdef TOGGLE_CEC_HARDWARE_FSM
if (!msg_send_complete)
msg_recv_complete = FALSE;
else {
/* Toggle CEC hardware FSM */
HDMI_OUTP(0x028C, 0x0);
HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE);
}
#else
HDMI_OUTP(0x028C, 0x0);
HDMI_OUTP(0x028C, HDMI_MSM_CEC_CTRL_ENABLE);
#endif
return IRQ_HANDLED;
}
#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT */
DEV_DBG("%s: HPD<Ctrl=%04x, State=%04x>, ddc_int_ctrl=%04x, "
"aud_int=%04x, cec_intr_status=%04x\n", __func__, hpd_int_ctrl,
hpd_int_status, ddc_int_ctrl, audio_int_val,
HDMI_INP_ND(0x029C));
return IRQ_HANDLED;
}
static int check_hdmi_features(void)
{
/* RAW_FEAT_CONFIG_ROW0_LSB */
uint32 val = inpdw(QFPROM_BASE + 0x0238);
/* HDMI_DISABLE */
boolean hdmi_disabled = (val & 0x00200000) >> 21;
/* HDCP_DISABLE */
boolean hdcp_disabled = (val & 0x00400000) >> 22;
DEV_DBG("Features <val:0x%08x, HDMI:%s, HDCP:%s>\n", val,
hdmi_disabled ? "OFF" : "ON", hdcp_disabled ? "OFF" : "ON");
if (hdmi_disabled) {
DEV_ERR("ERROR: HDMI disabled\n");
return -ENODEV;
}
if (hdcp_disabled)
DEV_WARN("WARNING: HDCP disabled\n");
return 0;
}
static boolean hdmi_msm_has_hdcp(void)
{
/* RAW_FEAT_CONFIG_ROW0_LSB, HDCP_DISABLE */
return (inpdw(QFPROM_BASE + 0x0238) & 0x00400000) ? FALSE : TRUE;
}
static boolean hdmi_msm_is_power_on(void)
{
/* HDMI_CTRL, ENABLE */
return (HDMI_INP_ND(0x0000) & 0x00000001) ? TRUE : FALSE;
}
/* 1.2.1.2.1 DVI Operation
* HDMI compliance requires the HDMI core to support DVI as well. The
* HDMI core also supports DVI. In DVI operation there are no preambles
* and guardbands transmitted. THe TMDS encoding of video data remains
* the same as HDMI. There are no VBI or audio packets transmitted. In
* order to enable DVI mode in HDMI core, HDMI_DVI_SEL field of
* HDMI_CTRL register needs to be programmed to 0. */
static boolean hdmi_msm_is_dvi_mode(void)
{
/* HDMI_CTRL, HDMI_DVI_SEL */
return (HDMI_INP_ND(0x0000) & 0x00000002) ? FALSE : TRUE;
}
void hdmi_msm_set_mode(boolean power_on)
{
uint32 reg_val = 0;
if (power_on) {
/* ENABLE */
reg_val |= 0x00000001; /* Enable the block */
if (external_common_state->hdmi_sink == 0) {
/* HDMI_DVI_SEL */
reg_val |= 0x00000002;
if (hdmi_msm_state->hdcp_enable)
/* HDMI Encryption */
reg_val |= 0x00000004;
/* HDMI_CTRL */
HDMI_OUTP(0x0000, reg_val);
/* HDMI_DVI_SEL */
reg_val &= ~0x00000002;
} else {
if (hdmi_msm_state->hdcp_enable)
/* HDMI_Encryption_ON */
reg_val |= 0x00000006;
else
reg_val |= 0x00000002;
}
} else
reg_val = 0x00000002;
/* HDMI_CTRL */
HDMI_OUTP(0x0000, reg_val);
DEV_DBG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
power_on ? "Enable" : "Disable", reg_val);
}
static void msm_hdmi_init_ddc(void)
{
/* 0x0220 HDMI_DDC_SPEED
[31:16] PRESCALE prescale = (m * xtal_frequency) /
(desired_i2c_speed), where m is multiply
factor, default: m = 1
[1:0] THRESHOLD Select threshold to use to determine whether value
sampled on SDA is a 1 or 0. Specified in terms of the ratio
between the number of sampled ones and the total number of times
SDA is sampled.
* 0x0: >0
* 0x1: 1/4 of total samples
* 0x2: 1/2 of total samples
* 0x3: 3/4 of total samples */
/* Configure the Pre-Scale multiplier
* Configure the Threshold */
HDMI_OUTP_ND(0x0220, (10 << 16) | (2 << 0));
/*
* 0x0224 HDMI_DDC_SETUP
* Setting 31:24 bits : Time units to wait before timeout
* when clock is being stalled by external sink device
*/
HDMI_OUTP_ND(0x0224, 0xff000000);
/* 0x027C HDMI_DDC_REF
[6] REFTIMER_ENABLE Enable the timer
* 0: Disable
* 1: Enable
[15:0] REFTIMER Value to set the register in order to generate
DDC strobe. This register counts on HDCP application clock */
/* Enable reference timer
* 27 micro-seconds */
HDMI_OUTP_ND(0x027C, (1 << 16) | (27 << 0));
}
static int hdmi_msm_ddc_clear_irq(const char *what)
{
const uint32 time_out = 0xFFFF;
uint32 time_out_count, reg_val;
/* clear pending and enable interrupt */
time_out_count = time_out;
do {
--time_out_count;
/* HDMI_DDC_INT_CTRL[0x0214]
[2] SW_DONE_MK Mask bit for SW_DONE_INT. Set to 1 to enable
interrupt.
[1] SW_DONE_ACK WRITE ONLY. Acknowledge bit for SW_DONE_INT.
Write 1 to clear interrupt.
[0] SW_DONE_INT READ ONLY. SW_DONE interrupt status */
/* Clear and Enable DDC interrupt */
/* Write */
HDMI_OUTP_ND(0x0214, (1 << 2) | (1 << 1));
/* Read back */
reg_val = HDMI_INP_ND(0x0214);
} while ((reg_val & 0x1) && time_out_count);
if (!time_out_count) {
DEV_ERR("%s[%s]: timedout\n", __func__, what);
return -ETIMEDOUT;
}
return 0;
}
static int hdmi_msm_ddc_write(uint32 dev_addr, uint32 offset,
const uint8 *data_buf, uint32 data_len, const char *what)
{
uint32 reg_val, ndx;
int status = 0, retry = 10;
uint32 time_out_count;
if (NULL == data_buf) {
status = -EINVAL;
DEV_ERR("%s[%s]: invalid input paramter\n", __func__, what);
goto error;
}
again:
status = hdmi_msm_ddc_clear_irq(what);
if (status)
goto error;
/* Ensure Device Address has LSB set to 0 to indicate Slave addr read */
dev_addr &= 0xFE;
/* 0x0238 HDMI_DDC_DATA
[31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to
1 while writing HDMI_DDC_DATA.
[23:16] INDEX Use to set index into DDC buffer for next read or
current write, or to read index of current read or next write.
Writable only when INDEX_WRITE=1.
[15:8] DATA Use to fill or read the DDC buffer
[0] DATA_RW Select whether buffer access will be a read or write.
For writes, address auto-increments on write to HDMI_DDC_DATA.
For reads, address autoincrements on reads to HDMI_DDC_DATA.
* 0: Write
* 1: Read */
/* 1. Write to HDMI_I2C_DATA with the following fields set in order to
* handle portion #1
* DATA_RW = 0x1 (write)
* DATA = linkAddress (primary link address and writing)
* INDEX = 0x0 (initial offset into buffer)
* INDEX_WRITE = 0x1 (setting initial offset) */
HDMI_OUTP_ND(0x0238, (0x1UL << 31) | (dev_addr << 8));
/* 2. Write to HDMI_I2C_DATA with the following fields set in order to
* handle portion #2
* DATA_RW = 0x0 (write)
* DATA = offsetAddress
* INDEX = 0x0
* INDEX_WRITE = 0x0 (auto-increment by hardware) */
HDMI_OUTP_ND(0x0238, offset << 8);
/* 3. Write to HDMI_I2C_DATA with the following fields set in order to
* handle portion #3
* DATA_RW = 0x0 (write)
* DATA = data_buf[ndx]
* INDEX = 0x0
* INDEX_WRITE = 0x0 (auto-increment by hardware) */
for (ndx = 0; ndx < data_len; ++ndx)
HDMI_OUTP_ND(0x0238, ((uint32)data_buf[ndx]) << 8);
/* Data setup is complete, now setup the transaction characteristics */
/* 0x0228 HDMI_DDC_TRANS0
[23:16] CNT0 Byte count for first transaction (excluding the first
byte, which is usually the address).
[13] STOP0 Determines whether a stop bit will be sent after the first
transaction
* 0: NO STOP
* 1: STOP
[12] START0 Determines whether a start bit will be sent before the
first transaction
* 0: NO START
* 1: START
[8] STOP_ON_NACK0 Determines whether the current transfer will stop
if a NACK is received during the first transaction (current
transaction always stops).
* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
[0] RW0 Read/write indicator for first transaction - set to 0 for
write, 1 for read. This bit only controls HDMI_DDC behaviour -
the R/W bit in the transaction is programmed into the DDC buffer
as the LSB of the address byte.
* 0: WRITE
* 1: READ */
/* 4. Write to HDMI_I2C_TRANSACTION0 with the following fields set in
order to handle characteristics of portion #1 and portion #2
* RW0 = 0x0 (write)
* START0 = 0x1 (insert START bit)
* STOP0 = 0x0 (do NOT insert STOP bit)
* CNT0 = 0x1 (single byte transaction excluding address) */
HDMI_OUTP_ND(0x0228, (1 << 12) | (1 << 16));
/* 0x022C HDMI_DDC_TRANS1
[23:16] CNT1 Byte count for second transaction (excluding the first
byte, which is usually the address).
[13] STOP1 Determines whether a stop bit will be sent after the second
transaction
* 0: NO STOP
* 1: STOP
[12] START1 Determines whether a start bit will be sent before the
second transaction
* 0: NO START
* 1: START
[8] STOP_ON_NACK1 Determines whether the current transfer will stop if
a NACK is received during the second transaction (current
transaction always stops).
* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
[0] RW1 Read/write indicator for second transaction - set to 0 for
write, 1 for read. This bit only controls HDMI_DDC behaviour -
the R/W bit in the transaction is programmed into the DDC buffer
as the LSB of the address byte.
* 0: WRITE
* 1: READ */
/* 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in
order to handle characteristics of portion #3
* RW1 = 0x1 (read)
* START1 = 0x1 (insert START bit)
* STOP1 = 0x1 (insert STOP bit)
* CNT1 = data_len (0xN (write N bytes of data))
* Byte count for second transition (excluding the first
* Byte which is usually the address) */
HDMI_OUTP_ND(0x022C, (1 << 13) | ((data_len-1) << 16));
/* Trigger the I2C transfer */
/* 0x020C HDMI_DDC_CTRL
[21:20] TRANSACTION_CNT
Number of transactions to be done in current transfer.
* 0x0: transaction0 only
* 0x1: transaction0, transaction1
* 0x2: transaction0, transaction1, transaction2
* 0x3: transaction0, transaction1, transaction2, transaction3
[3] SW_STATUS_RESET
Write 1 to reset HDMI_DDC_SW_STATUS flags, will reset SW_DONE,
ABORTED, TIMEOUT, SW_INTERRUPTED, BUFFER_OVERFLOW,
STOPPED_ON_NACK, NACK0, NACK1, NACK2, NACK3
[2] SEND_RESET Set to 1 to send reset sequence (9 clocks with no
data) at start of transfer. This sequence is sent after GO is
written to 1, before the first transaction only.
[1] SOFT_RESET Write 1 to reset DDC controller
[0] GO WRITE ONLY. Write 1 to start DDC transfer. */
/* 6. Write to HDMI_I2C_CONTROL to kick off the hardware.
* Note that NOTHING has been transmitted on the DDC lines up to this
* point.
* TRANSACTION_CNT = 0x1 (execute transaction0 followed by
* transaction1)
* GO = 0x1 (kicks off hardware) */
INIT_COMPLETION(hdmi_msm_state->ddc_sw_done);
HDMI_OUTP_ND(0x020C, (1 << 0) | (1 << 20));
time_out_count = wait_for_completion_interruptible_timeout(
&hdmi_msm_state->ddc_sw_done, HZ/2);
HDMI_OUTP_ND(0x0214, 0x2);
if (!time_out_count) {
if (retry-- > 0) {
DEV_INFO("%s[%s]: failed timout, retry=%d\n", __func__,
what, retry);
goto again;
}
status = -ETIMEDOUT;
DEV_ERR("%s[%s]: timedout, DDC SW Status=%08x, HW "
"Status=%08x, Int Ctrl=%08x\n", __func__, what,
HDMI_INP_ND(0x0218), HDMI_INP_ND(0x021C),
HDMI_INP_ND(0x0214));
goto error;
}
/* Read DDC status */
reg_val = HDMI_INP_ND(0x0218);
reg_val &= 0x00001000 | 0x00002000 | 0x00004000 | 0x00008000;
/* Check if any NACK occurred */
if (reg_val) {
if (retry > 1)
HDMI_OUTP_ND(0x020C, BIT(3)); /* SW_STATUS_RESET */
else
HDMI_OUTP_ND(0x020C, BIT(1)); /* SOFT_RESET */
if (retry-- > 0) {
DEV_DBG("%s[%s]: failed NACK=%08x, retry=%d\n",
__func__, what, reg_val, retry);
msleep(100);
goto again;
}
status = -EIO;
DEV_ERR("%s[%s]: failed NACK: %08x\n", __func__, what, reg_val);
goto error;
}
DEV_DBG("%s[%s] success\n", __func__, what);
error:
return status;
}
static int hdmi_msm_ddc_read_retry(uint32 dev_addr, uint32 offset,
uint8 *data_buf, uint32 data_len, uint32 request_len, int retry,
const char *what)
{
uint32 reg_val, ndx;
int status = 0;
uint32 time_out_count;
int log_retry_fail = retry != 1;
if (NULL == data_buf) {
status = -EINVAL;
DEV_ERR("%s: invalid input paramter\n", __func__);
goto error;
}
again:
status = hdmi_msm_ddc_clear_irq(what);
if (status)
goto error;
/* Ensure Device Address has LSB set to 0 to indicate Slave addr read */
dev_addr &= 0xFE;
/* 0x0238 HDMI_DDC_DATA
[31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to
1 while writing HDMI_DDC_DATA.
[23:16] INDEX Use to set index into DDC buffer for next read or
current write, or to read index of current read or next write.
Writable only when INDEX_WRITE=1.
[15:8] DATA Use to fill or read the DDC buffer
[0] DATA_RW Select whether buffer access will be a read or write.
For writes, address auto-increments on write to HDMI_DDC_DATA.
For reads, address autoincrements on reads to HDMI_DDC_DATA.
* 0: Write
* 1: Read */
/* 1. Write to HDMI_I2C_DATA with the following fields set in order to
* handle portion #1
* DATA_RW = 0x0 (write)
* DATA = linkAddress (primary link address and writing)
* INDEX = 0x0 (initial offset into buffer)
* INDEX_WRITE = 0x1 (setting initial offset) */
HDMI_OUTP_ND(0x0238, (0x1UL << 31) | (dev_addr << 8));
/* 2. Write to HDMI_I2C_DATA with the following fields set in order to
* handle portion #2
* DATA_RW = 0x0 (write)
* DATA = offsetAddress
* INDEX = 0x0
* INDEX_WRITE = 0x0 (auto-increment by hardware) */
HDMI_OUTP_ND(0x0238, offset << 8);
/* 3. Write to HDMI_I2C_DATA with the following fields set in order to
* handle portion #3
* DATA_RW = 0x0 (write)
* DATA = linkAddress + 1 (primary link address 0x74 and reading)
* INDEX = 0x0
* INDEX_WRITE = 0x0 (auto-increment by hardware) */
HDMI_OUTP_ND(0x0238, (dev_addr | 1) << 8);
/* Data setup is complete, now setup the transaction characteristics */
/* 0x0228 HDMI_DDC_TRANS0
[23:16] CNT0 Byte count for first transaction (excluding the first
byte, which is usually the address).
[13] STOP0 Determines whether a stop bit will be sent after the first
transaction
* 0: NO STOP
* 1: STOP
[12] START0 Determines whether a start bit will be sent before the
first transaction
* 0: NO START
* 1: START
[8] STOP_ON_NACK0 Determines whether the current transfer will stop
if a NACK is received during the first transaction (current
transaction always stops).
* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
[0] RW0 Read/write indicator for first transaction - set to 0 for
write, 1 for read. This bit only controls HDMI_DDC behaviour -
the R/W bit in the transaction is programmed into the DDC buffer
as the LSB of the address byte.
* 0: WRITE
* 1: READ */
/* 4. Write to HDMI_I2C_TRANSACTION0 with the following fields set in
order to handle characteristics of portion #1 and portion #2
* RW0 = 0x0 (write)
* START0 = 0x1 (insert START bit)
* STOP0 = 0x0 (do NOT insert STOP bit)
* CNT0 = 0x1 (single byte transaction excluding address) */
HDMI_OUTP_ND(0x0228, (1 << 12) | (1 << 16));
/* 0x022C HDMI_DDC_TRANS1
[23:16] CNT1 Byte count for second transaction (excluding the first
byte, which is usually the address).
[13] STOP1 Determines whether a stop bit will be sent after the second
transaction
* 0: NO STOP
* 1: STOP
[12] START1 Determines whether a start bit will be sent before the
second transaction
* 0: NO START
* 1: START
[8] STOP_ON_NACK1 Determines whether the current transfer will stop if
a NACK is received during the second transaction (current
transaction always stops).
* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
[0] RW1 Read/write indicator for second transaction - set to 0 for
write, 1 for read. This bit only controls HDMI_DDC behaviour -
the R/W bit in the transaction is programmed into the DDC buffer
as the LSB of the address byte.
* 0: WRITE
* 1: READ */
/* 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in
order to handle characteristics of portion #3
* RW1 = 0x1 (read)
* START1 = 0x1 (insert START bit)
* STOP1 = 0x1 (insert STOP bit)
* CNT1 = data_len (it's 128 (0x80) for a blk read) */
HDMI_OUTP_ND(0x022C, 1 | (1 << 12) | (1 << 13) | (request_len << 16));
/* Trigger the I2C transfer */
/* 0x020C HDMI_DDC_CTRL
[21:20] TRANSACTION_CNT
Number of transactions to be done in current transfer.
* 0x0: transaction0 only
* 0x1: transaction0, transaction1
* 0x2: transaction0, transaction1, transaction2
* 0x3: transaction0, transaction1, transaction2, transaction3
[3] SW_STATUS_RESET
Write 1 to reset HDMI_DDC_SW_STATUS flags, will reset SW_DONE,
ABORTED, TIMEOUT, SW_INTERRUPTED, BUFFER_OVERFLOW,
STOPPED_ON_NACK, NACK0, NACK1, NACK2, NACK3
[2] SEND_RESET Set to 1 to send reset sequence (9 clocks with no
data) at start of transfer. This sequence is sent after GO is
written to 1, before the first transaction only.
[1] SOFT_RESET Write 1 to reset DDC controller
[0] GO WRITE ONLY. Write 1 to start DDC transfer. */
/* 6. Write to HDMI_I2C_CONTROL to kick off the hardware.
* Note that NOTHING has been transmitted on the DDC lines up to this
* point.
* TRANSACTION_CNT = 0x1 (execute transaction0 followed by
* transaction1)
* SEND_RESET = Set to 1 to send reset sequence
* GO = 0x1 (kicks off hardware) */
INIT_COMPLETION(hdmi_msm_state->ddc_sw_done);
HDMI_OUTP_ND(0x020C, (1 << 0) | (1 << 20));
time_out_count = wait_for_completion_interruptible_timeout(
&hdmi_msm_state->ddc_sw_done, HZ/2);
HDMI_OUTP_ND(0x0214, 0x2);
if (!time_out_count) {
if (retry-- > 0) {
DEV_INFO("%s: failed timout, retry=%d\n", __func__,
retry);
goto again;
}
status = -ETIMEDOUT;
DEV_ERR("%s: timedout(7), DDC SW Status=%08x, HW "
"Status=%08x, Int Ctrl=%08x\n", __func__,
HDMI_INP(0x0218), HDMI_INP(0x021C), HDMI_INP(0x0214));
goto error;
}
/* Read DDC status */
reg_val = HDMI_INP_ND(0x0218);
reg_val &= 0x00001000 | 0x00002000 | 0x00004000 | 0x00008000;
/* Check if any NACK occurred */
if (reg_val) {
HDMI_OUTP_ND(0x020C, BIT(3)); /* SW_STATUS_RESET */
if (retry == 1)
HDMI_OUTP_ND(0x020C, BIT(1)); /* SOFT_RESET */
if (retry-- > 0) {
DEV_DBG("%s(%s): failed NACK=0x%08x, retry=%d, "
"dev-addr=0x%02x, offset=0x%02x, "
"length=%d\n", __func__, what,
reg_val, retry, dev_addr,
offset, data_len);
goto again;
}
status = -EIO;
if (log_retry_fail)
DEV_ERR("%s(%s): failed NACK=0x%08x, dev-addr=0x%02x, "
"offset=0x%02x, length=%d\n", __func__, what,
reg_val, dev_addr, offset, data_len);
goto error;
}
/* 0x0238 HDMI_DDC_DATA
[31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to 1
while writing HDMI_DDC_DATA.
[23:16] INDEX Use to set index into DDC buffer for next read or
current write, or to read index of current read or next write.
Writable only when INDEX_WRITE=1.
[15:8] DATA Use to fill or read the DDC buffer
[0] DATA_RW Select whether buffer access will be a read or write.
For writes, address auto-increments on write to HDMI_DDC_DATA.
For reads, address autoincrements on reads to HDMI_DDC_DATA.
* 0: Write
* 1: Read */
/* 8. ALL data is now available and waiting in the DDC buffer.
* Read HDMI_I2C_DATA with the following fields set
* RW = 0x1 (read)
* DATA = BCAPS (this is field where data is pulled from)
* INDEX = 0x3 (where the data has been placed in buffer by hardware)
* INDEX_WRITE = 0x1 (explicitly define offset) */
/* Write this data to DDC buffer */
HDMI_OUTP_ND(0x0238, 0x1 | (3 << 16) | (1 << 31));
/* Discard first byte */
HDMI_INP_ND(0x0238);
for (ndx = 0; ndx < data_len; ++ndx) {
reg_val = HDMI_INP_ND(0x0238);
data_buf[ndx] = (uint8) ((reg_val & 0x0000FF00) >> 8);
}
DEV_DBG("%s[%s] success\n", __func__, what);
error:
return status;
}
static int hdmi_msm_ddc_read_edid_seg(uint32 dev_addr, uint32 offset,
uint8 *data_buf, uint32 data_len, uint32 request_len, int retry,
const char *what)
{
uint32 reg_val, ndx;
int status = 0;
uint32 time_out_count;
int log_retry_fail = retry != 1;
int seg_addr = 0x60, seg_num = 0x01;
if (NULL == data_buf) {
status = -EINVAL;
DEV_ERR("%s: invalid input paramter\n", __func__);
goto error;
}
again:
status = hdmi_msm_ddc_clear_irq(what);
if (status)
goto error;
/* Ensure Device Address has LSB set to 0 to indicate Slave addr read */
dev_addr &= 0xFE;
/* 0x0238 HDMI_DDC_DATA
[31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to
1 while writing HDMI_DDC_DATA.
[23:16] INDEX Use to set index into DDC buffer for next read or
current write, or to read index of current read or next write.
Writable only when INDEX_WRITE=1.
[15:8] DATA Use to fill or read the DDC buffer
[0] DATA_RW Select whether buffer access will be a read or write.
For writes, address auto-increments on write to HDMI_DDC_DATA.
For reads, address autoincrements on reads to HDMI_DDC_DATA.
* 0: Write
* 1: Read */
/* 1. Write to HDMI_I2C_DATA with the following fields set in order to
* handle portion #1
* DATA_RW = 0x0 (write)
* DATA = linkAddress (primary link address and writing)
* INDEX = 0x0 (initial offset into buffer)
* INDEX_WRITE = 0x1 (setting initial offset) */
HDMI_OUTP_ND(0x0238, (0x1UL << 31) | (seg_addr << 8));
/* 2. Write to HDMI_I2C_DATA with the following fields set in order to
* handle portion #2
* DATA_RW = 0x0 (write)
* DATA = offsetAddress
* INDEX = 0x0
* INDEX_WRITE = 0x0 (auto-increment by hardware) */
HDMI_OUTP_ND(0x0238, seg_num << 8);
/* 3. Write to HDMI_I2C_DATA with the following fields set in order to
* handle portion #3
* DATA_RW = 0x0 (write)
* DATA = linkAddress + 1 (primary link address 0x74 and reading)
* INDEX = 0x0
* INDEX_WRITE = 0x0 (auto-increment by hardware) */
HDMI_OUTP_ND(0x0238, dev_addr << 8);
HDMI_OUTP_ND(0x0238, offset << 8);
HDMI_OUTP_ND(0x0238, (dev_addr | 1) << 8);
/* Data setup is complete, now setup the transaction characteristics */
/* 0x0228 HDMI_DDC_TRANS0
[23:16] CNT0 Byte count for first transaction (excluding the first
byte, which is usually the address).
[13] STOP0 Determines whether a stop bit will be sent after the first
transaction
* 0: NO STOP
* 1: STOP
[12] START0 Determines whether a start bit will be sent before the
first transaction
* 0: NO START
* 1: START
[8] STOP_ON_NACK0 Determines whether the current transfer will stop
if a NACK is received during the first transaction (current
transaction always stops).
* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
[0] RW0 Read/write indicator for first transaction - set to 0 for
write, 1 for read. This bit only controls HDMI_DDC behaviour -
the R/W bit in the transaction is programmed into the DDC buffer
as the LSB of the address byte.
* 0: WRITE
* 1: READ */
/* 4. Write to HDMI_I2C_TRANSACTION0 with the following fields set in
order to handle characteristics of portion #1 and portion #2
* RW0 = 0x0 (write)
* START0 = 0x1 (insert START bit)
* STOP0 = 0x0 (do NOT insert STOP bit)
* CNT0 = 0x1 (single byte transaction excluding address) */
HDMI_OUTP_ND(0x0228, (1 << 12) | (1 << 16));
/* 0x022C HDMI_DDC_TRANS1
[23:16] CNT1 Byte count for second transaction (excluding the first
byte, which is usually the address).
[13] STOP1 Determines whether a stop bit will be sent after the second
transaction
* 0: NO STOP
* 1: STOP
[12] START1 Determines whether a start bit will be sent before the
second transaction
* 0: NO START
* 1: START
[8] STOP_ON_NACK1 Determines whether the current transfer will stop if
a NACK is received during the second transaction (current
transaction always stops).
* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
[0] RW1 Read/write indicator for second transaction - set to 0 for
write, 1 for read. This bit only controls HDMI_DDC behaviour -
the R/W bit in the transaction is programmed into the DDC buffer
as the LSB of the address byte.
* 0: WRITE
* 1: READ */
/* 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in
order to handle characteristics of portion #3
* RW1 = 0x1 (read)
* START1 = 0x1 (insert START bit)
* STOP1 = 0x1 (insert STOP bit)
* CNT1 = data_len (it's 128 (0x80) for a blk read) */
HDMI_OUTP_ND(0x022C, (1 << 12) | (1 << 16));
/* 0x022C HDMI_DDC_TRANS2
[23:16] CNT1 Byte count for second transaction (excluding the first
byte, which is usually the address).
[13] STOP1 Determines whether a stop bit will be sent after the second
transaction
* 0: NO STOP
* 1: STOP
[12] START1 Determines whether a start bit will be sent before the
second transaction
* 0: NO START
* 1: START
[8] STOP_ON_NACK1 Determines whether the current transfer will stop if
a NACK is received during the second transaction (current
transaction always stops).
* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
[0] RW1 Read/write indicator for second transaction - set to 0 for
write, 1 for read. This bit only controls HDMI_DDC behaviour -
the R/W bit in the transaction is programmed into the DDC buffer
as the LSB of the address byte.
* 0: WRITE
* 1: READ */
/* 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in
order to handle characteristics of portion #3
* RW1 = 0x1 (read)
* START1 = 0x1 (insert START bit)
* STOP1 = 0x1 (insert STOP bit)
* CNT1 = data_len (it's 128 (0x80) for a blk read) */
HDMI_OUTP_ND(0x0230, 1 | (1 << 12) | (1 << 13) | (request_len << 16));
/* Trigger the I2C transfer */
/* 0x020C HDMI_DDC_CTRL
[21:20] TRANSACTION_CNT
Number of transactions to be done in current transfer.
* 0x0: transaction0 only
* 0x1: transaction0, transaction1
* 0x2: transaction0, transaction1, transaction2
* 0x3: transaction0, transaction1, transaction2, transaction3
[3] SW_STATUS_RESET
Write 1 to reset HDMI_DDC_SW_STATUS flags, will reset SW_DONE,
ABORTED, TIMEOUT, SW_INTERRUPTED, BUFFER_OVERFLOW,
STOPPED_ON_NACK, NACK0, NACK1, NACK2, NACK3
[2] SEND_RESET Set to 1 to send reset sequence (9 clocks with no
data) at start of transfer. This sequence is sent after GO is
written to 1, before the first transaction only.
[1] SOFT_RESET Write 1 to reset DDC controller
[0] GO WRITE ONLY. Write 1 to start DDC transfer. */
/* 6. Write to HDMI_I2C_CONTROL to kick off the hardware.
* Note that NOTHING has been transmitted on the DDC lines up to this
* point.
* TRANSACTION_CNT = 0x2 (execute transaction0 followed by
* transaction1)
* GO = 0x1 (kicks off hardware) */
INIT_COMPLETION(hdmi_msm_state->ddc_sw_done);
HDMI_OUTP_ND(0x020C, (1 << 0) | (2 << 20));
time_out_count = wait_for_completion_interruptible_timeout(
&hdmi_msm_state->ddc_sw_done, HZ/2);
HDMI_OUTP_ND(0x0214, 0x2);
if (!time_out_count) {
if (retry-- > 0) {
DEV_INFO("%s: failed timout, retry=%d\n", __func__,
retry);
goto again;
}
status = -ETIMEDOUT;
DEV_ERR("%s: timedout(7), DDC SW Status=%08x, HW "
"Status=%08x, Int Ctrl=%08x\n", __func__,
HDMI_INP(0x0218), HDMI_INP(0x021C), HDMI_INP(0x0214));
goto error;
}
/* Read DDC status */
reg_val = HDMI_INP_ND(0x0218);
reg_val &= 0x00001000 | 0x00002000 | 0x00004000 | 0x00008000;
/* Check if any NACK occurred */
if (reg_val) {
HDMI_OUTP_ND(0x020C, BIT(3)); /* SW_STATUS_RESET */
if (retry == 1)
HDMI_OUTP_ND(0x020C, BIT(1)); /* SOFT_RESET */
if (retry-- > 0) {
DEV_DBG("%s(%s): failed NACK=0x%08x, retry=%d, "
"dev-addr=0x%02x, offset=0x%02x, "
"length=%d\n", __func__, what,
reg_val, retry, dev_addr,
offset, data_len);
goto again;
}
status = -EIO;
if (log_retry_fail)
DEV_ERR("%s(%s): failed NACK=0x%08x, dev-addr=0x%02x, "
"offset=0x%02x, length=%d\n", __func__, what,
reg_val, dev_addr, offset, data_len);
goto error;
}
/* 0x0238 HDMI_DDC_DATA
[31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to 1
while writing HDMI_DDC_DATA.
[23:16] INDEX Use to set index into DDC buffer for next read or
current write, or to read index of current read or next write.
Writable only when INDEX_WRITE=1.
[15:8] DATA Use to fill or read the DDC buffer
[0] DATA_RW Select whether buffer access will be a read or write.
For writes, address auto-increments on write to HDMI_DDC_DATA.
For reads, address autoincrements on reads to HDMI_DDC_DATA.
* 0: Write
* 1: Read */
/* 8. ALL data is now available and waiting in the DDC buffer.
* Read HDMI_I2C_DATA with the following fields set
* RW = 0x1 (read)
* DATA = BCAPS (this is field where data is pulled from)
* INDEX = 0x5 (where the data has been placed in buffer by hardware)
* INDEX_WRITE = 0x1 (explicitly define offset) */
/* Write this data to DDC buffer */
HDMI_OUTP_ND(0x0238, 0x1 | (5 << 16) | (1 << 31));
/* Discard first byte */
HDMI_INP_ND(0x0238);
for (ndx = 0; ndx < data_len; ++ndx) {
reg_val = HDMI_INP_ND(0x0238);
data_buf[ndx] = (uint8) ((reg_val & 0x0000FF00) >> 8);
}
DEV_DBG("%s[%s] success\n", __func__, what);
error:
return status;
}
static int hdmi_msm_ddc_read(uint32 dev_addr, uint32 offset, uint8 *data_buf,
uint32 data_len, int retry, const char *what, boolean no_align)
{
int ret = hdmi_msm_ddc_read_retry(dev_addr, offset, data_buf, data_len,
data_len, retry, what);
if (!ret)
return 0;
if (no_align) {
return hdmi_msm_ddc_read_retry(dev_addr, offset, data_buf,
data_len, data_len, retry, what);
} else {
return hdmi_msm_ddc_read_retry(dev_addr, offset, data_buf,
data_len, 32 * ((data_len + 31) / 32), retry, what);
}
}
static int hdmi_msm_read_edid_block(int block, uint8 *edid_buf)
{
int i, rc = 0;
int block_size = 0x80;
do {
DEV_DBG("EDID: reading block(%d) with block-size=%d\n",
block, block_size);
for (i = 0; i < 0x80; i += block_size) {
/*Read EDID twice with 32bit alighnment too */
if (block < 2) {
rc = hdmi_msm_ddc_read(0xA0, block*0x80 + i,
edid_buf+i, block_size, 1,
"EDID", FALSE);
} else {
rc = hdmi_msm_ddc_read_edid_seg(0xA0,
block*0x80 + i, edid_buf+i, block_size,
block_size, 1, "EDID");
}
if (rc)
break;
}
block_size /= 2;
} while (rc && (block_size >= 16));
return rc;
}
static int hdmi_msm_read_edid(void)
{
int status;
msm_hdmi_init_ddc();
/* Looks like we need to turn on HDMI engine before any
* DDC transaction */
if (!hdmi_msm_is_power_on()) {
DEV_ERR("%s: failed: HDMI power is off", __func__);
status = -ENXIO;
goto error;
}
external_common_state->read_edid_block = hdmi_msm_read_edid_block;
status = hdmi_common_read_edid();
if (!status)
DEV_DBG("EDID: successfully read\n");
error:
return status;
}
static void hdcp_auth_info(uint32 auth_info)
{
if (!hdmi_msm_state->hdcp_enable) {
DEV_DBG("%s: HDCP not enabled\n", __func__);
return;
}
switch (auth_info) {
case 0:
DEV_INFO("%s: None", __func__);
break;
case 1:
DEV_INFO("%s: Software Disabled Authentication", __func__);
break;
case 2:
DEV_INFO("%s: An Written", __func__);
break;
case 3:
DEV_INFO("%s: Invalid Aksv", __func__);
break;
case 4:
DEV_INFO("%s: Invalid Bksv", __func__);
break;
case 5:
DEV_INFO("%s: RI Mismatch (including RO)", __func__);
break;
case 6:
DEV_INFO("%s: consecutive Pj Mismatches", __func__);
break;
case 7:
DEV_INFO("%s: HPD Disconnect", __func__);
break;
case 8:
default:
DEV_INFO("%s: Reserved", __func__);
break;
}
}
static void hdcp_key_state(uint32 key_state)
{
if (!hdmi_msm_state->hdcp_enable) {
DEV_DBG("%s: HDCP not enabled\n", __func__);
return;
}
switch (key_state) {
case 0:
DEV_WARN("%s: No HDCP Keys", __func__);
break;
case 1:
DEV_WARN("%s: Not Checked", __func__);
break;
case 2:
DEV_DBG("%s: Checking", __func__);
break;
case 3:
DEV_DBG("%s: HDCP Keys Valid", __func__);
break;
case 4:
DEV_WARN("%s: AKSV not valid", __func__);
break;
case 5:
DEV_WARN("%s: Checksum Mismatch", __func__);
break;
case 6:
DEV_DBG("%s: Production AKSV"
"with ENABLE_USER_DEFINED_AN=1", __func__);
break;
case 7:
default:
DEV_INFO("%s: Reserved", __func__);
break;
}
}
static int hdmi_msm_count_one(uint8 *array, uint8 len)
{
int i, j, count = 0;
for (i = 0; i < len; i++)
for (j = 0; j < 8; j++)
count += (((array[i] >> j) & 0x1) ? 1 : 0);
return count;
}
static void hdcp_deauthenticate(void)
{
int hdcp_link_status = HDMI_INP(0x011C);
if (!hdmi_msm_state->hdcp_enable) {
DEV_DBG("%s: HDCP not enabled\n", __func__);
return;
}
/* Disable HDCP interrupts */
HDMI_OUTP(0x0118, 0x0);
mutex_lock(&hdcp_auth_state_mutex);
external_common_state->hdcp_active = FALSE;
mutex_unlock(&hdcp_auth_state_mutex);
/* 0x0130 HDCP_RESET
[0] LINK0_DEAUTHENTICATE */
HDMI_OUTP(0x0130, 0x1);
/* 0x0110 HDCP_CTRL
[8] ENCRYPTION_ENABLE
[0] ENABLE */
/* encryption_enable = 0 | hdcp block enable = 1 */
HDMI_OUTP(0x0110, 0x0);
if (hdcp_link_status & 0x00000004)
hdcp_auth_info((hdcp_link_status & 0x000000F0) >> 4);
}
static void check_and_clear_HDCP_DDC_Failure(void)
{
int hdcp_ddc_ctrl1_reg;
int hdcp_ddc_status;
int failure;
int nack0;
if (!hdmi_msm_state->hdcp_enable) {
DEV_DBG("%s: HDCP not enabled\n", __func__);
return;
}
/*
* Check for any DDC transfer failures
* 0x0128 HDCP_DDC_STATUS
* [16] FAILED Indicates that the last HDCP HW DDC transer
* failed. This occurs when a transfer is
* attempted with HDCP DDC disabled
* (HDCP_DDC_DISABLE=1) or the number of retries
* match HDCP_DDC_RETRY_CNT
*
* [14] NACK0 Indicates that the last HDCP HW DDC transfer
* was aborted due to a NACK on the first
* transaction - cleared by writing 0 to GO bit
*/
hdcp_ddc_status = HDMI_INP(HDCP_DDC_STATUS);
failure = (hdcp_ddc_status >> 16) & 0x1;
nack0 = (hdcp_ddc_status >> 14) & 0x1;
DEV_DBG("%s: On Entry: HDCP_DDC_STATUS = 0x%x, FAILURE = %d,"
"NACK0 = %d\n", __func__ , hdcp_ddc_status, failure, nack0);
if (failure == 0x1) {
/*
* Indicates that the last HDCP HW DDC transfer failed.
* This occurs when a transfer is attempted with HDCP DDC
* disabled (HDCP_DDC_DISABLE=1) or the number of retries
* matches HDCP_DDC_RETRY_CNT.
* Failure occured, let's clear it.
*/
DEV_INFO("%s: DDC failure detected. HDCP_DDC_STATUS=0x%08x\n",
__func__, hdcp_ddc_status);
/*
* First, Disable DDC
* 0x0120 HDCP_DDC_CTRL_0
* [0] DDC_DISABLE Determines whether HDCP Ri and Pj reads
* are done unassisted by hardware or by
* software via HDMI_DDC (HDCP provides
* interrupts to request software
* transfers)
* 0 : Use Hardware DDC
* 1 : Use Software DDC
*/
HDMI_OUTP(HDCP_DDC_CTRL_0, 0x1);
/*
* ACK the Failure to Clear it
* 0x0124 HDCP_DDC_CTRL_1
* [0] DDC_FAILED_ACK Write 1 to clear
* HDCP_STATUS.HDCP_DDC_FAILED
*/
hdcp_ddc_ctrl1_reg = HDMI_INP(HDCP_DDC_CTRL_1);
HDMI_OUTP(HDCP_DDC_CTRL_1, hdcp_ddc_ctrl1_reg | 0x1);
/* Check if the FAILURE got Cleared */
hdcp_ddc_status = HDMI_INP(HDCP_DDC_STATUS);
hdcp_ddc_status = (hdcp_ddc_status >> 16) & 0x1;
if (hdcp_ddc_status == 0x0) {
DEV_INFO("%s: HDCP DDC Failure has been cleared\n",
__func__);
} else {
DEV_WARN("%s: Error: HDCP DDC Failure DID NOT get"
"cleared\n", __func__);
}
/* Re-Enable HDCP DDC */
HDMI_OUTP(HDCP_DDC_CTRL_0, 0x0);
}
if (nack0 == 0x1) {
/*
* 0x020C HDMI_DDC_CTRL
* [3] SW_STATUS_RESET Write 1 to reset HDMI_DDC_SW_STATUS
* flags, will reset SW_DONE, ABORTED,
* TIMEOUT, SW_INTERRUPTED,
* BUFFER_OVERFLOW, STOPPED_ON_NACK, NACK0,
* NACK1, NACK2, NACK3
*/
HDMI_OUTP_ND(HDMI_DDC_CTRL,
HDMI_INP(HDMI_DDC_CTRL) | (0x1 << 3));
msleep(20);
HDMI_OUTP_ND(HDMI_DDC_CTRL,
HDMI_INP(HDMI_DDC_CTRL) & ~(0x1 << 3));
}
hdcp_ddc_status = HDMI_INP(HDCP_DDC_STATUS);
failure = (hdcp_ddc_status >> 16) & 0x1;
nack0 = (hdcp_ddc_status >> 14) & 0x1;
DEV_DBG("%s: On Exit: HDCP_DDC_STATUS = 0x%x, FAILURE = %d,"
"NACK0 = %d\n", __func__ , hdcp_ddc_status, failure, nack0);
}
static int hdcp_authentication_part1(void)
{
int ret = 0;
boolean is_match;
boolean is_part1_done = FALSE;
uint32 timeout_count;
uint8 bcaps;
uint8 aksv[5];
uint32 qfprom_aksv_0, qfprom_aksv_1, link0_aksv_0, link0_aksv_1;
uint8 bksv[5];
uint32 link0_bksv_0, link0_bksv_1;
uint8 an[8];
uint32 link0_an_0, link0_an_1;
uint32 hpd_int_status, hpd_int_ctrl;
static uint8 buf[0xFF];
memset(buf, 0, sizeof(buf));
if (!hdmi_msm_state->hdcp_enable) {
DEV_DBG("%s: HDCP not enabled\n", __func__);
return 0;
}
if (!is_part1_done) {
is_part1_done = TRUE;
/* Fetch aksv from QFprom, this info should be public. */
qfprom_aksv_0 = inpdw(QFPROM_BASE + 0x000060D8);
qfprom_aksv_1 = inpdw(QFPROM_BASE + 0x000060DC);
/* copy an and aksv to byte arrays for transmission */
aksv[0] = qfprom_aksv_0 & 0xFF;
aksv[1] = (qfprom_aksv_0 >> 8) & 0xFF;
aksv[2] = (qfprom_aksv_0 >> 16) & 0xFF;
aksv[3] = (qfprom_aksv_0 >> 24) & 0xFF;
aksv[4] = qfprom_aksv_1 & 0xFF;
/* check there are 20 ones in AKSV */
if (hdmi_msm_count_one(aksv, 5) != 20) {
DEV_ERR("HDCP: AKSV read from QFPROM doesn't have "
"20 1's and 20 0's, FAIL (AKSV=%02x%08x)\n",
qfprom_aksv_1, qfprom_aksv_0);
ret = -EINVAL;
goto error;
}
DEV_DBG("HDCP: AKSV=%02x%08x\n", qfprom_aksv_1, qfprom_aksv_0);
/* 0x0288 HDCP_SW_LOWER_AKSV
[31:0] LOWER_AKSV */
/* 0x0284 HDCP_SW_UPPER_AKSV
[7:0] UPPER_AKSV */
/* This is the lower 32 bits of the SW
* injected AKSV value(AKSV[31:0]) read
* from the EFUSE. It is needed for HDCP
* authentication and must be written
* before enabling HDCP. */
HDMI_OUTP(0x0288, qfprom_aksv_0);
HDMI_OUTP(0x0284, qfprom_aksv_1);
msm_hdmi_init_ddc();
/* read Bcaps at 0x40 in HDCP Port */
ret = hdmi_msm_ddc_read(0x74, 0x40, &bcaps, 1, 5, "Bcaps",
TRUE);
if (ret) {
DEV_ERR("%s(%d): Read Bcaps failed", __func__,
__LINE__);
goto error;
}
DEV_DBG("HDCP: Bcaps=%02x\n", bcaps);
/* HDCP setup prior to HDCP enabled */
/* 0x0148 HDCP_RCVPORT_DATA4
[15:8] LINK0_AINFO
[7:0] LINK0_AKSV_1 */
/* LINK0_AINFO = 0x2 FEATURE 1.1 on.
* = 0x0 FEATURE 1.1 off*/
HDMI_OUTP(0x0148, 0x0);
/* 0x012C HDCP_ENTROPY_CTRL0
[31:0] BITS_OF_INFLUENCE_0 */
/* 0x025C HDCP_ENTROPY_CTRL1
[31:0] BITS_OF_INFLUENCE_1 */
HDMI_OUTP(0x012C, 0xB1FFB0FF);
HDMI_OUTP(0x025C, 0xF00DFACE);
/* 0x0114 HDCP_DEBUG_CTRL
[2] DEBUG_RNG_CIPHER
else default 0 */
HDMI_OUTP(0x0114, HDMI_INP(0x0114) & 0xFFFFFFFB);
/* 0x0110 HDCP_CTRL
[8] ENCRYPTION_ENABLE
[0] ENABLE */
/* Enable HDCP. Encryption should be enabled after reading R0 */
HDMI_OUTP(0x0110, BIT(0));
/*
* Check to see if a HDCP DDC Failure is indicated in
* HDCP_DDC_STATUS. If yes, clear it.
*/
check_and_clear_HDCP_DDC_Failure();
/* 0x0118 HDCP_INT_CTRL
* [2] AUTH_SUCCESS_MASK [R/W] Mask bit for\
* HDCP Authentication
* Success interrupt - set to 1 to enable interrupt
*
* [6] AUTH_FAIL_MASK [R/W] Mask bit for HDCP
* Authentication
* Lost interrupt set to 1 to enable interrupt
*
* [7] AUTH_FAIL_INFO_ACK [W] Acknwledge bit for HDCP
* Auth Failure Info field - write 1 to clear
*
* [10] DDC_XFER_REQ_MASK [R/W] Mask bit for HDCP\
* DDC Transfer
* Request interrupt - set to 1 to enable interrupt
*
* [14] DDC_XFER_DONE_MASK [R/W] Mask bit for HDCP\
* DDC Transfer
* done interrupt - set to 1 to enable interrupt */
/* enable all HDCP ints */
HDMI_OUTP(0x0118, (1 << 2) | (1 << 6) | (1 << 7));
/* 0x011C HDCP_LINK0_STATUS
[8] AN_0_READY
[9] AN_1_READY */
/* wait for an0 and an1 ready bits to be set in LINK0_STATUS */
mutex_lock(&hdcp_auth_state_mutex);
timeout_count = 100;
while (((HDMI_INP_ND(0x011C) & (0x3 << 8)) != (0x3 << 8))
&& timeout_count--)
msleep(20);
if (!timeout_count) {
ret = -ETIMEDOUT;
DEV_ERR("%s(%d): timedout, An0=%d, An1=%d\n",
__func__, __LINE__,
(HDMI_INP_ND(0x011C) & BIT(8)) >> 8,
(HDMI_INP_ND(0x011C) & BIT(9)) >> 9);
mutex_unlock(&hdcp_auth_state_mutex);
goto error;
}
/* 0x0168 HDCP_RCVPORT_DATA12
[23:8] BSTATUS
[7:0] BCAPS */
HDMI_OUTP(0x0168, bcaps);
/* 0x014C HDCP_RCVPORT_DATA5
[31:0] LINK0_AN_0 */
/* read an0 calculation */
link0_an_0 = HDMI_INP(0x014C);
/* 0x0150 HDCP_RCVPORT_DATA6
[31:0] LINK0_AN_1 */
/* read an1 calculation */
link0_an_1 = HDMI_INP(0x0150);
mutex_unlock(&hdcp_auth_state_mutex);
/* three bits 28..30 */
hdcp_key_state((HDMI_INP(0x011C) >> 28) & 0x7);
/* 0x0144 HDCP_RCVPORT_DATA3
[31:0] LINK0_AKSV_0 public key
0x0148 HDCP_RCVPORT_DATA4
[15:8] LINK0_AINFO
[7:0] LINK0_AKSV_1 public key */
link0_aksv_0 = HDMI_INP(0x0144);
link0_aksv_1 = HDMI_INP(0x0148);
/* copy an and aksv to byte arrays for transmission */
aksv[0] = link0_aksv_0 & 0xFF;
aksv[1] = (link0_aksv_0 >> 8) & 0xFF;
aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
aksv[4] = link0_aksv_1 & 0xFF;
an[0] = link0_an_0 & 0xFF;
an[1] = (link0_an_0 >> 8) & 0xFF;
an[2] = (link0_an_0 >> 16) & 0xFF;
an[3] = (link0_an_0 >> 24) & 0xFF;
an[4] = link0_an_1 & 0xFF;
an[5] = (link0_an_1 >> 8) & 0xFF;
an[6] = (link0_an_1 >> 16) & 0xFF;
an[7] = (link0_an_1 >> 24) & 0xFF;
/* Write An 8 bytes to offset 0x18 */
ret = hdmi_msm_ddc_write(0x74, 0x18, an, 8, "An");
if (ret) {
DEV_ERR("%s(%d): Write An failed", __func__, __LINE__);
goto error;
}
/* Write Aksv 5 bytes to offset 0x10 */
ret = hdmi_msm_ddc_write(0x74, 0x10, aksv, 5, "Aksv");
if (ret) {
DEV_ERR("%s(%d): Write Aksv failed", __func__,
__LINE__);
goto error;
}
DEV_DBG("HDCP: Link0-AKSV=%02x%08x\n",
link0_aksv_1 & 0xFF, link0_aksv_0);
/* Read Bksv 5 bytes at 0x00 in HDCP port */
ret = hdmi_msm_ddc_read(0x74, 0x00, bksv, 5, 5, "Bksv", TRUE);
if (ret) {
DEV_ERR("%s(%d): Read BKSV failed", __func__, __LINE__);
goto error;
}
/* check there are 20 ones in BKSV */
if (hdmi_msm_count_one(bksv, 5) != 20) {
DEV_ERR("HDCP: BKSV read from Sink doesn't have "
"20 1's and 20 0's, FAIL (BKSV="
"%02x%02x%02x%02x%02x)\n",
bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
ret = -EINVAL;
goto error;
}
link0_bksv_0 = bksv[3];
link0_bksv_0 = (link0_bksv_0 << 8) | bksv[2];
link0_bksv_0 = (link0_bksv_0 << 8) | bksv[1];
link0_bksv_0 = (link0_bksv_0 << 8) | bksv[0];
link0_bksv_1 = bksv[4];
DEV_DBG("HDCP: BKSV=%02x%08x\n", link0_bksv_1, link0_bksv_0);
/* 0x0134 HDCP_RCVPORT_DATA0
[31:0] LINK0_BKSV_0 */
HDMI_OUTP(0x0134, link0_bksv_0);
/* 0x0138 HDCP_RCVPORT_DATA1
[31:0] LINK0_BKSV_1 */
HDMI_OUTP(0x0138, link0_bksv_1);
DEV_DBG("HDCP: Link0-BKSV=%02x%08x\n", link0_bksv_1,
link0_bksv_0);
/* HDMI_HPD_INT_STATUS[0x0250] */
hpd_int_status = HDMI_INP_ND(0x0250);
/* HDMI_HPD_INT_CTRL[0x0254] */
hpd_int_ctrl = HDMI_INP_ND(0x0254);
DEV_DBG("[SR-DEUG]: HPD_INTR_CTRL=[%u] HPD_INTR_STATUS=[%u] "
"before reading R0'\n", hpd_int_ctrl, hpd_int_status);
/*
* HDCP Compliace Test case 1B-01:
* Wait here until all the ksv bytes have been
* read from the KSV FIFO register.
*/
msleep(125);
/* Reading R0' 2 bytes at offset 0x08 */
ret = hdmi_msm_ddc_read(0x74, 0x08, buf, 2, 5, "RO'", TRUE);
if (ret) {
DEV_ERR("%s(%d): Read RO's failed", __func__,
__LINE__);
goto error;
}
DEV_DBG("HDCP: R0'=%02x%02x\n", buf[1], buf[0]);
INIT_COMPLETION(hdmi_msm_state->hdcp_success_done);
/* 0x013C HDCP_RCVPORT_DATA2_0
[15:0] LINK0_RI */
HDMI_OUTP(0x013C, (((uint32)buf[1]) << 8) | buf[0]);
timeout_count = wait_for_completion_interruptible_timeout(
&hdmi_msm_state->hdcp_success_done, HZ*2);
if (!timeout_count) {
ret = -ETIMEDOUT;
is_match = HDMI_INP(0x011C) & BIT(12);
DEV_ERR("%s(%d): timedout, Link0=<%s>\n", __func__,
__LINE__,
is_match ? "RI_MATCH" : "No RI Match INTR in time");
if (!is_match)
goto error;
}
/* 0x011C HDCP_LINK0_STATUS
[12] RI_MATCHES [0] MISMATCH, [1] MATCH
[0] AUTH_SUCCESS */
/* Checking for RI, R0 Match */
/* RI_MATCHES */
if ((HDMI_INP(0x011C) & BIT(12)) != BIT(12)) {
ret = -EINVAL;
DEV_ERR("%s: HDCP_LINK0_STATUS[RI_MATCHES]: MISMATCH\n",
__func__);
goto error;
}
/* Enable HDCP Encryption */
HDMI_OUTP(0x0110, BIT(0) | BIT(8));
DEV_INFO("HDCP: authentication part I, successful\n");
is_part1_done = FALSE;
return 0;
error:
DEV_ERR("[%s]: HDCP Reauthentication\n", __func__);
is_part1_done = FALSE;
return ret;
} else {
return 1;
}
}
static int hdmi_msm_transfer_v_h(void)
{
/* Read V'.HO 4 Byte at offset 0x20 */
char what[20];
int ret;
uint8 buf[4];
if (!hdmi_msm_state->hdcp_enable) {
DEV_DBG("%s: HDCP not enabled\n", __func__);
return 0;
}
snprintf(what, sizeof(what), "V' H0");
ret = hdmi_msm_ddc_read(0x74, 0x20, buf, 4, 5, what, TRUE);
if (ret) {
DEV_ERR("%s: Read %s failed", __func__, what);
return ret;
}
DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ",
buf[0] , buf[1] , buf[2] , buf[3]);
/* 0x0154 HDCP_RCVPORT_DATA7
[31:0] V_HO */
HDMI_OUTP(0x0154 ,
(buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0]));
snprintf(what, sizeof(what), "V' H1");
ret = hdmi_msm_ddc_read(0x74, 0x24, buf, 4, 5, what, TRUE);
if (ret) {
DEV_ERR("%s: Read %s failed", __func__, what);
return ret;
}
DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ",
buf[0] , buf[1] , buf[2] , buf[3]);
/* 0x0158 HDCP_RCVPORT_ DATA8
[31:0] V_H1 */
HDMI_OUTP(0x0158,
(buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0]));
snprintf(what, sizeof(what), "V' H2");
ret = hdmi_msm_ddc_read(0x74, 0x28, buf, 4, 5, what, TRUE);
if (ret) {
DEV_ERR("%s: Read %s failed", __func__, what);
return ret;
}
DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ",
buf[0] , buf[1] , buf[2] , buf[3]);
/* 0x015c HDCP_RCVPORT_DATA9
[31:0] V_H2 */
HDMI_OUTP(0x015c ,
(buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0]));
snprintf(what, sizeof(what), "V' H3");
ret = hdmi_msm_ddc_read(0x74, 0x2c, buf, 4, 5, what, TRUE);
if (ret) {
DEV_ERR("%s: Read %s failed", __func__, what);
return ret;
}
DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ",
buf[0] , buf[1] , buf[2] , buf[3]);
/* 0x0160 HDCP_RCVPORT_DATA10
[31:0] V_H3 */
HDMI_OUTP(0x0160,
(buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0]));
snprintf(what, sizeof(what), "V' H4");
ret = hdmi_msm_ddc_read(0x74, 0x30, buf, 4, 5, what, TRUE);
if (ret) {
DEV_ERR("%s: Read %s failed", __func__, what);
return ret;
}
DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ",
buf[0] , buf[1] , buf[2] , buf[3]);
/* 0x0164 HDCP_RCVPORT_DATA11
[31:0] V_H4 */
HDMI_OUTP(0x0164,
(buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0]));
return 0;
}
static int hdcp_authentication_part2(void)
{
int ret = 0;
uint32 timeout_count;
int i = 0;
int cnt = 0;
uint bstatus;
uint8 bcaps;
uint32 down_stream_devices;
uint32 ksv_bytes;
static uint8 buf[0xFF];
static uint8 kvs_fifo[5 * 127];
boolean max_devs_exceeded = 0;
boolean max_cascade_exceeded = 0;
boolean ksv_done = FALSE;
if (!hdmi_msm_state->hdcp_enable) {
DEV_DBG("%s: HDCP not enabled\n", __func__);
return 0;
}
memset(buf, 0, sizeof(buf));
memset(kvs_fifo, 0, sizeof(kvs_fifo));
/* wait until READY bit is set in bcaps */
timeout_count = 50;
do {
timeout_count--;
/* read bcaps 1 Byte at offset 0x40 */
ret = hdmi_msm_ddc_read(0x74, 0x40, &bcaps, 1, 1,
"Bcaps", FALSE);
if (ret) {
DEV_ERR("%s(%d): Read Bcaps failed", __func__,
__LINE__);
goto error;
}
msleep(100);
} while ((0 == (bcaps & 0x20)) && timeout_count); /* READY (Bit 5) */
if (!timeout_count) {
ret = -ETIMEDOUT;
DEV_ERR("%s:timedout(1)", __func__);
goto error;
}
/* read bstatus 2 bytes at offset 0x41 */
ret = hdmi_msm_ddc_read(0x74, 0x41, buf, 2, 5, "Bstatus", FALSE);
if (ret) {
DEV_ERR("%s(%d): Read Bstatus failed", __func__, __LINE__);
goto error;
}
bstatus = buf[1];
bstatus = (bstatus << 8) | buf[0];
/* 0x0168 DCP_RCVPORT_DATA12
[7:0] BCAPS
[23:8 BSTATUS */
HDMI_OUTP(0x0168, bcaps | (bstatus << 8));
/* BSTATUS [6:0] DEVICE_COUNT Number of HDMI device attached to repeater
* - see HDCP spec */
down_stream_devices = bstatus & 0x7F;
if (down_stream_devices == 0x0) {
/* There isn't any devices attaced to the Repeater */
DEV_ERR("%s: there isn't any devices attached to the "
"Repeater\n", __func__);
ret = -EINVAL;
goto error;
}
/*
* HDCP Compliance 1B-05:
* Check if no. of devices connected to repeater
* exceed max_devices_connected from bit 7 of Bstatus.
*/
max_devs_exceeded = (bstatus & 0x80) >> 7;
if (max_devs_exceeded == 0x01) {
DEV_ERR("%s: Number of devs connected to repeater "
"exceeds max_devs\n", __func__);
ret = -EINVAL;
goto hdcp_error;
}
/*
* HDCP Compliance 1B-06:
* Check if no. of cascade connected to repeater
* exceed max_cascade_connected from bit 11 of Bstatus.
*/
max_cascade_exceeded = (bstatus & 0x800) >> 11;
if (max_cascade_exceeded == 0x01) {
DEV_ERR("%s: Number of cascade connected to repeater "
"exceeds max_cascade\n", __func__);
ret = -EINVAL;
goto hdcp_error;
}
/* Read KSV FIFO over DDC
* Key Slection vector FIFO
* Used to pull downstream KSVs from HDCP Repeaters.
* All bytes (DEVICE_COUNT * 5) must be read in a single,
* auto incrementing access.
* All bytes read as 0x00 for HDCP Receivers that are not
* HDCP Repeaters (REPEATER == 0). */
ksv_bytes = 5 * down_stream_devices;
/* Reading KSV FIFO / KSV FIFO */
ksv_done = FALSE;
ret = hdmi_msm_ddc_read(0x74, 0x43, kvs_fifo, ksv_bytes, 5,
"KSV FIFO", TRUE);
do {
if (ret) {
DEV_ERR("%s(%d): Read KSV FIFO failed",
__func__, __LINE__);
/*
* HDCP Compliace Test case 1B-01:
* Wait here until all the ksv bytes have been
* read from the KSV FIFO register.
*/
msleep(25);
} else {
ksv_done = TRUE;
}
cnt++;
} while (!ksv_done && cnt != 20);
if (ksv_done == FALSE)
goto error;
ret = hdmi_msm_transfer_v_h();
if (ret)
goto error;
/* Next: Write KSV FIFO to HDCP_SHA_DATA.
* This is done 1 byte at time starting with the LSB.
* On the very last byte write,
* the HDCP_SHA_DATA_DONE bit[0]
*/
/* 0x023C HDCP_SHA_CTRL
[0] RESET [0] Enable, [1] Reset
[4] SELECT [0] DIGA_HDCP, [1] DIGB_HDCP */
/* reset SHA engine */
HDMI_OUTP(0x023C, 1);
/* enable SHA engine, SEL=DIGA_HDCP */
HDMI_OUTP(0x023C, 0);
for (i = 0; i < ksv_bytes - 1; i++) {
/* Write KSV byte and do not set DONE bit[0] */
HDMI_OUTP_ND(0x0244, kvs_fifo[i] << 16);
/* Once 64 bytes have been written, we need to poll for
* HDCP_SHA_BLOCK_DONE before writing any further
*/
if (i && !((i+1)%64)) {
timeout_count = 100;
while (!(HDMI_INP_ND(0x0240) & 0x1)
&& (--timeout_count)) {
DEV_DBG("HDCP Auth Part II: Waiting for the "
"computation of the current 64 byte to "
"complete. HDCP_SHA_STATUS=%08x. "
"timeout_count=%d\n",
HDMI_INP_ND(0x0240), timeout_count);
msleep(20);
}
if (!timeout_count) {
ret = -ETIMEDOUT;
DEV_ERR("%s(%d): timedout", __func__, __LINE__);
goto error;
}
}
}
/* Write l to DONE bit[0] */
HDMI_OUTP_ND(0x0244, (kvs_fifo[ksv_bytes - 1] << 16) | 0x1);
/* 0x0240 HDCP_SHA_STATUS
[4] COMP_DONE */
/* Now wait for HDCP_SHA_COMP_DONE */
timeout_count = 100;
while ((0x10 != (HDMI_INP_ND(0x0240) & 0xFFFFFF10)) && --timeout_count)
msleep(20);
if (!timeout_count) {
ret = -ETIMEDOUT;
DEV_ERR("%s(%d): timedout", __func__, __LINE__);
goto error;
}
/* 0x011C HDCP_LINK0_STATUS
[20] V_MATCHES */
timeout_count = 100;
while (((HDMI_INP_ND(0x011C) & (1 << 20)) != (1 << 20))
&& --timeout_count) {
msleep(20);
}
if (!timeout_count) {
ret = -ETIMEDOUT;
DEV_ERR("%s(%d): timedout", __func__, __LINE__);
goto error;
}
DEV_INFO("HDCP: authentication part II, successful\n");
hdcp_error:
error:
return ret;
}
static int hdcp_authentication_part3(uint32 found_repeater)
{
int ret = 0;
int poll = 3000;
if (!hdmi_msm_state->hdcp_enable) {
DEV_DBG("%s: HDCP not enabled\n", __func__);
return 0;
}
while (poll) {
/* 0x011C HDCP_LINK0_STATUS
[30:28] KEYS_STATE = 3 = "Valid"
[24] RO_COMPUTATION_DONE [0] Not Done, [1] Done
[20] V_MATCHES [0] Mismtach, [1] Match
[12] RI_MATCHES [0] Mismatch, [1] Match
[0] AUTH_SUCCESS */
if (HDMI_INP_ND(0x011C) != (0x31001001 |
(found_repeater << 20))) {
DEV_ERR("HDCP: autentication part III, FAILED, "
"Link Status=%08x\n", HDMI_INP(0x011C));
ret = -EINVAL;
goto error;
}
poll--;
}
DEV_INFO("HDCP: authentication part III, successful\n");
error:
return ret;
}
static void hdmi_msm_hdcp_enable(void)
{
int ret = 0;
uint8 bcaps;
uint32 found_repeater = 0x0;
char *envp[2];
if (!hdmi_msm_state->hdcp_enable) {
DEV_INFO("%s: HDCP NOT ENABLED\n", __func__);
return;
}
mutex_lock(&hdmi_msm_state_mutex);
hdmi_msm_state->hdcp_activating = TRUE;
mutex_unlock(&hdmi_msm_state_mutex);
fill_black_screen();
mutex_lock(&hdcp_auth_state_mutex);
/* This flag prevents other threads from re-authenticating
* after we've just authenticated (i.e., finished part3)
* We probably need to protect this in a mutex lock */
hdmi_msm_state->full_auth_done = FALSE;
mutex_unlock(&hdcp_auth_state_mutex);
/* Disable HDCP before we start part1 */
HDMI_OUTP(0x0110, 0x0);
/* PART I Authentication*/
ret = hdcp_authentication_part1();
if (ret)
goto error;
/* PART II Authentication*/
/* read Bcaps at 0x40 in HDCP Port */
ret = hdmi_msm_ddc_read(0x74, 0x40, &bcaps, 1, 5, "Bcaps", FALSE);
if (ret) {
DEV_ERR("%s(%d): Read Bcaps failed\n", __func__, __LINE__);
goto error;
}
DEV_DBG("HDCP: Bcaps=0x%02x (%s)\n", bcaps,
(bcaps & BIT(6)) ? "repeater" : "no repeater");
/* if REPEATER (Bit 6), perform Part2 Authentication */
if (bcaps & BIT(6)) {
found_repeater = 0x1;
ret = hdcp_authentication_part2();
if (ret)
goto error;
} else
DEV_INFO("HDCP: authentication part II skipped, no repeater\n");
/* PART III Authentication*/
ret = hdcp_authentication_part3(found_repeater);
if (ret)
goto error;
unfill_black_screen();
mutex_lock(&hdmi_msm_state_mutex);
hdmi_msm_state->hdcp_activating = FALSE;
mutex_unlock(&hdmi_msm_state_mutex);
mutex_lock(&hdcp_auth_state_mutex);
/*
* This flag prevents other threads from re-authenticating
* after we've just authenticated (i.e., finished part3)
*/
hdmi_msm_state->full_auth_done = TRUE;
external_common_state->hdcp_active = TRUE;
mutex_unlock(&hdcp_auth_state_mutex);
if (!hdmi_msm_is_dvi_mode()) {
DEV_INFO("HDMI HPD: sense : send HDCP_PASS\n");
envp[0] = "HDCP_STATE=PASS";
envp[1] = NULL;
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
SWITCH_SET_HDMI_AUDIO(1, 0);
}
return;
error:
if (hdmi_msm_state->hpd_during_auth) {
DEV_WARN("Calling Deauthentication: HPD occured during "
"authentication from [%s]\n", __func__);
hdcp_deauthenticate();
mutex_lock(&hdcp_auth_state_mutex);
hdmi_msm_state->hpd_during_auth = FALSE;
mutex_unlock(&hdcp_auth_state_mutex);
} else {
DEV_WARN("[DEV_DBG]: Calling reauth from [%s]\n", __func__);
if (hdmi_msm_state->panel_power_on)
queue_work(hdmi_work_queue,
&hdmi_msm_state->hdcp_reauth_work);
}
mutex_lock(&hdmi_msm_state_mutex);
hdmi_msm_state->hdcp_activating = FALSE;
mutex_unlock(&hdmi_msm_state_mutex);
}
static void hdmi_msm_video_setup(int video_format)
{
uint32 total_v = 0;
uint32 total_h = 0;
uint32 start_h = 0;
uint32 end_h = 0;
uint32 start_v = 0;
uint32 end_v = 0;
const struct hdmi_disp_mode_timing_type *timing =
hdmi_common_get_supported_mode(video_format);
/* timing register setup */
if (timing == NULL) {
DEV_ERR("video format not supported: %d\n", video_format);
return;
}
/* Hsync Total and Vsync Total */
total_h = timing->active_h + timing->front_porch_h
+ timing->back_porch_h + timing->pulse_width_h - 1;
total_v = timing->active_v + timing->front_porch_v
+ timing->back_porch_v + timing->pulse_width_v - 1;
/* 0x02C0 HDMI_TOTAL
[27:16] V_TOTAL Vertical Total
[11:0] H_TOTAL Horizontal Total */
HDMI_OUTP(0x02C0, ((total_v << 16) & 0x0FFF0000)
| ((total_h << 0) & 0x00000FFF));
/* Hsync Start and Hsync End */
start_h = timing->back_porch_h + timing->pulse_width_h;
end_h = (total_h + 1) - timing->front_porch_h;
/* 0x02B4 HDMI_ACTIVE_H
[27:16] END Horizontal end
[11:0] START Horizontal start */
HDMI_OUTP(0x02B4, ((end_h << 16) & 0x0FFF0000)
| ((start_h << 0) & 0x00000FFF));
start_v = timing->back_porch_v + timing->pulse_width_v - 1;
end_v = total_v - timing->front_porch_v;
/* 0x02B8 HDMI_ACTIVE_V
[27:16] END Vertical end
[11:0] START Vertical start */
HDMI_OUTP(0x02B8, ((end_v << 16) & 0x0FFF0000)
| ((start_v << 0) & 0x00000FFF));
if (timing->interlaced) {
/* 0x02C4 HDMI_V_TOTAL_F2
[11:0] V_TOTAL_F2 Vertical total for field2 */
HDMI_OUTP(0x02C4, ((total_v + 1) << 0) & 0x00000FFF);
/* 0x02BC HDMI_ACTIVE_V_F2
[27:16] END_F2 Vertical end for field2
[11:0] START_F2 Vertical start for Field2 */
HDMI_OUTP(0x02BC,
(((start_v + 1) << 0) & 0x00000FFF)
| (((end_v + 1) << 16) & 0x0FFF0000));
} else {
/* HDMI_V_TOTAL_F2 */
HDMI_OUTP(0x02C4, 0);
/* HDMI_ACTIVE_V_F2 */
HDMI_OUTP(0x02BC, 0);
}
hdmi_frame_ctrl_cfg(timing);
}
struct hdmi_msm_audio_acr {
uint32 n; /* N parameter for clock regeneration */
uint32 cts; /* CTS parameter for clock regeneration */
};
struct hdmi_msm_audio_arcs {
uint32 pclk;
struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX];
};
#define HDMI_MSM_AUDIO_ARCS(pclk, ...) { pclk, __VA_ARGS__ }
/* Audio constants lookup table for hdmi_msm_audio_acr_setup */
/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
static const struct hdmi_msm_audio_arcs hdmi_msm_audio_acr_lut[] = {
/* 25.200MHz */
HDMI_MSM_AUDIO_ARCS(25200, {
{4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
{12288, 25200}, {25088, 28000}, {24576, 25200} }),
/* 27.000MHz */
HDMI_MSM_AUDIO_ARCS(27000, {
{4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
{12288, 27000}, {25088, 30000}, {24576, 27000} }),
/* 27.027MHz */
HDMI_MSM_AUDIO_ARCS(27030, {
{4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030},
{12288, 27027}, {25088, 30030}, {24576, 27027} }),
/* 74.250MHz */
HDMI_MSM_AUDIO_ARCS(74250, {
{4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
{12288, 74250}, {25088, 82500}, {24576, 74250} }),
/* 148.500MHz */
HDMI_MSM_AUDIO_ARCS(148500, {
{4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000},
{12288, 148500}, {25088, 165000}, {24576, 148500} }),
};
static void hdmi_msm_audio_acr_setup(boolean enabled, int video_format,
int audio_sample_rate, int num_of_channels)
{
/* Read first before writing */
/* HDMI_ACR_PKT_CTRL[0x0024] */
uint32 acr_pck_ctrl_reg = HDMI_INP(0x0024);
if (enabled) {
const struct hdmi_disp_mode_timing_type *timing =
hdmi_common_get_supported_mode(video_format);
const struct hdmi_msm_audio_arcs *audio_arc =
&hdmi_msm_audio_acr_lut[0];
const int lut_size = sizeof(hdmi_msm_audio_acr_lut)
/sizeof(*hdmi_msm_audio_acr_lut);
uint32 i, n, cts, layout, multiplier, aud_pck_ctrl_2_reg;
if (timing == NULL) {
DEV_WARN("%s: video format %d not supported\n",
__func__, video_format);
return;
}
for (i = 0; i < lut_size;
audio_arc = &hdmi_msm_audio_acr_lut[++i]) {
if (audio_arc->pclk == timing->pixel_freq)
break;
}
if (i >= lut_size) {
DEV_WARN("%s: pixel clock %d not supported\n", __func__,
timing->pixel_freq);
return;
}
n = audio_arc->lut[audio_sample_rate].n;
cts = audio_arc->lut[audio_sample_rate].cts;
layout = (MSM_HDMI_AUDIO_CHANNEL_2 == num_of_channels) ? 0 : 1;
if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio_sample_rate) ||
(MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio_sample_rate)) {
multiplier = 4;
n >>= 2; /* divide N by 4 and use multiplier */
} else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio_sample_rate) ||
(MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio_sample_rate)) {
multiplier = 2;
n >>= 1; /* divide N by 2 and use multiplier */
} else {
multiplier = 1;
}
DEV_DBG("%s: n=%u, cts=%u, layout=%u\n", __func__, n, cts,
layout);
/* AUDIO_PRIORITY | SOURCE */
acr_pck_ctrl_reg |= 0x80000100;
/* N_MULTIPLE(multiplier) */
acr_pck_ctrl_reg |= (multiplier & 7) << 16;
if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio_sample_rate) ||
(MSM_HDMI_SAMPLE_RATE_96KHZ == audio_sample_rate) ||
(MSM_HDMI_SAMPLE_RATE_192KHZ == audio_sample_rate)) {
/* SELECT(3) */
acr_pck_ctrl_reg |= 3 << 4;
/* CTS_48 */
cts <<= 12;
/* CTS: need to determine how many fractional bits */
/* HDMI_ACR_48_0 */
HDMI_OUTP(0x00D4, cts);
/* N */
/* HDMI_ACR_48_1 */
HDMI_OUTP(0x00D8, n);
} else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio_sample_rate)
|| (MSM_HDMI_SAMPLE_RATE_88_2KHZ ==
audio_sample_rate)
|| (MSM_HDMI_SAMPLE_RATE_176_4KHZ ==
audio_sample_rate)) {
/* SELECT(2) */
acr_pck_ctrl_reg |= 2 << 4;
/* CTS_44 */
cts <<= 12;
/* CTS: need to determine how many fractional bits */
/* HDMI_ACR_44_0 */
HDMI_OUTP(0x00CC, cts);
/* N */
/* HDMI_ACR_44_1 */
HDMI_OUTP(0x00D0, n);
} else { /* default to 32k */
/* SELECT(1) */
acr_pck_ctrl_reg |= 1 << 4;
/* CTS_32 */
cts <<= 12;
/* CTS: need to determine how many fractional bits */
/* HDMI_ACR_32_0 */
HDMI_OUTP(0x00C4, cts);
/* N */
/* HDMI_ACR_32_1 */
HDMI_OUTP(0x00C8, n);
}
/* Payload layout depends on number of audio channels */
/* LAYOUT_SEL(layout) */
aud_pck_ctrl_2_reg = 1 | (layout << 1);
/* override | layout */
/* HDMI_AUDIO_PKT_CTRL2[0x00044] */
HDMI_OUTP(0x00044, aud_pck_ctrl_2_reg);
/* SEND | CONT */
acr_pck_ctrl_reg |= 0x00000003;
} else {
/* ~(SEND | CONT) */
acr_pck_ctrl_reg &= ~0x00000003;
}
/* HDMI_ACR_PKT_CTRL[0x0024] */
HDMI_OUTP(0x0024, acr_pck_ctrl_reg);
}
static void hdmi_msm_outpdw_chk(uint32 offset, uint32 data)
{
uint32 check, i = 0;
#ifdef DEBUG
HDMI_OUTP(offset, data);
#endif
do {
outpdw(MSM_HDMI_BASE+offset, data);
check = inpdw(MSM_HDMI_BASE+offset);
} while (check != data && i++ < 10);
if (check != data)
DEV_ERR("%s: failed addr=%08x, data=%x, check=%x",
__func__, offset, data, check);
}
static void hdmi_msm_rmw32or(uint32 offset, uint32 data)
{
uint32 reg_data;
reg_data = inpdw(MSM_HDMI_BASE+offset);
reg_data = inpdw(MSM_HDMI_BASE+offset);
hdmi_msm_outpdw_chk(offset, reg_data | data);
}
#define HDMI_AUDIO_CFG 0x01D0
#define HDMI_AUDIO_ENGINE_ENABLE 1
#define HDMI_AUDIO_FIFO_MASK 0x000000F0
#define HDMI_AUDIO_FIFO_WATERMARK_SHIFT 4
#define HDMI_AUDIO_FIFO_MAX_WATER_MARK 8
int hdmi_audio_enable(bool on , u32 fifo_water_mark)
{
u32 hdmi_audio_config;
hdmi_audio_config = HDMI_INP(HDMI_AUDIO_CFG);
if (on) {
if (fifo_water_mark > HDMI_AUDIO_FIFO_MAX_WATER_MARK) {
pr_err("%s : HDMI audio fifo water mark can not be more"
" than %u\n", __func__,
HDMI_AUDIO_FIFO_MAX_WATER_MARK);
return -EINVAL;
}
/*
* Enable HDMI Audio engine.
* MUST be enabled after Audio DMA is enabled.
*/
hdmi_audio_config &= ~(HDMI_AUDIO_FIFO_MASK);
hdmi_audio_config |= (HDMI_AUDIO_ENGINE_ENABLE |
(fifo_water_mark << HDMI_AUDIO_FIFO_WATERMARK_SHIFT));
} else
hdmi_audio_config &= ~(HDMI_AUDIO_ENGINE_ENABLE);
HDMI_OUTP(HDMI_AUDIO_CFG, hdmi_audio_config);
mb();
pr_info("%s :HDMI_AUDIO_CFG 0x%08x\n", __func__,
HDMI_INP(HDMI_AUDIO_CFG));
return 0;
}
EXPORT_SYMBOL(hdmi_audio_enable);
#define HDMI_AUDIO_PKT_CTRL 0x0020
#define HDMI_AUDIO_SAMPLE_SEND_ENABLE 1
int hdmi_audio_packet_enable(bool on)
{
u32 hdmi_audio_pkt_ctrl;
hdmi_audio_pkt_ctrl = HDMI_INP(HDMI_AUDIO_PKT_CTRL);
if (on)
hdmi_audio_pkt_ctrl |= HDMI_AUDIO_SAMPLE_SEND_ENABLE;
else
hdmi_audio_pkt_ctrl &= ~(HDMI_AUDIO_SAMPLE_SEND_ENABLE);
HDMI_OUTP(HDMI_AUDIO_PKT_CTRL, hdmi_audio_pkt_ctrl);
mb();
pr_info("%s : HDMI_AUDIO_PKT_CTRL 0x%08x\n", __func__,
HDMI_INP(HDMI_AUDIO_PKT_CTRL));
return 0;
}
EXPORT_SYMBOL(hdmi_audio_packet_enable);
/* TO-DO: return -EINVAL when num_of_channels and channel_allocation
* does not match CEA 861-D spec.
*/
int hdmi_msm_audio_info_setup(bool enabled, u32 num_of_channels,
u32 channel_allocation, u32 level_shift, bool down_mix)
{
uint32 channel_count = 1; /* Default to 2 channels
-> See Table 17 in CEA-D spec */
uint32 check_sum, audio_info_0_reg, audio_info_1_reg;
uint32 audio_info_ctrl_reg;
u32 aud_pck_ctrl_2_reg;
u32 layout;
layout = (MSM_HDMI_AUDIO_CHANNEL_2 == num_of_channels) ? 0 : 1;
aud_pck_ctrl_2_reg = 1 | (layout << 1);
HDMI_OUTP(0x00044, aud_pck_ctrl_2_reg);
/* Please see table 20 Audio InfoFrame in HDMI spec
FL = front left
FC = front Center
FR = front right
FLC = front left center
FRC = front right center
RL = rear left
RC = rear center
RR = rear right
RLC = rear left center
RRC = rear right center
LFE = low frequency effect
*/
/* Read first then write because it is bundled with other controls */
/* HDMI_INFOFRAME_CTRL0[0x002C] */
audio_info_ctrl_reg = HDMI_INP(0x002C);
if (enabled) {
switch (num_of_channels) {
case MSM_HDMI_AUDIO_CHANNEL_2:
channel_allocation = 0; /* Default to FR,FL */
break;
case MSM_HDMI_AUDIO_CHANNEL_4:
channel_count = 3;
/* FC,LFE,FR,FL */
channel_allocation = 0x3;
break;
case MSM_HDMI_AUDIO_CHANNEL_6:
channel_count = 5;
/* RR,RL,FC,LFE,FR,FL */
channel_allocation = 0xB;
break;
case MSM_HDMI_AUDIO_CHANNEL_8:
channel_count = 7;
/* FRC,FLC,RR,RL,FC,LFE,FR,FL */
channel_allocation = 0x1f;
break;
default:
pr_err("%s(): Unsupported num_of_channels = %u\n",
__func__, num_of_channels);
return -EINVAL;
break;
}
/* Program the Channel-Speaker allocation */
audio_info_1_reg = 0;
/* CA(channel_allocation) */
audio_info_1_reg |= channel_allocation & 0xff;
/* Program the Level shifter */
/* LSV(level_shift) */
audio_info_1_reg |= (level_shift << 11) & 0x00007800;
/* Program the Down-mix Inhibit Flag */
/* DM_INH(down_mix) */
audio_info_1_reg |= (down_mix << 15) & 0x00008000;
/* HDMI_AUDIO_INFO1[0x00E8] */
HDMI_OUTP(0x00E8, audio_info_1_reg);
/* Calculate CheckSum
Sum of all the bytes in the Audio Info Packet bytes
(See table 8.4 in HDMI spec) */
check_sum = 0;
/* HDMI_AUDIO_INFO_FRAME_PACKET_HEADER_TYPE[0x84] */
check_sum += 0x84;
/* HDMI_AUDIO_INFO_FRAME_PACKET_HEADER_VERSION[0x01] */
check_sum += 1;
/* HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH[0x0A] */
check_sum += 0x0A;
check_sum += channel_count;
check_sum += channel_allocation;
/* See Table 8.5 in HDMI spec */
check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7;
check_sum &= 0xFF;
check_sum = (uint8) (256 - check_sum);
audio_info_0_reg = 0;
/* CHECKSUM(check_sum) */
audio_info_0_reg |= check_sum & 0xff;
/* CC(channel_count) */
audio_info_0_reg |= (channel_count << 8) & 0x00000700;
/* HDMI_AUDIO_INFO0[0x00E4] */
HDMI_OUTP(0x00E4, audio_info_0_reg);
/* Set these flags */
/* AUDIO_INFO_UPDATE | AUDIO_INFO_SOURCE | AUDIO_INFO_CONT
| AUDIO_INFO_SEND */
audio_info_ctrl_reg |= 0x000000F0;
} else {
/* Clear these flags */
/* ~(AUDIO_INFO_UPDATE | AUDIO_INFO_SOURCE | AUDIO_INFO_CONT
| AUDIO_INFO_SEND) */
audio_info_ctrl_reg &= ~0x000000F0;
}
/* HDMI_INFOFRAME_CTRL0[0x002C] */
HDMI_OUTP(0x002C, audio_info_ctrl_reg);
hdmi_msm_dump_regs("HDMI-AUDIO-ON: ");
return 0;
}
EXPORT_SYMBOL(hdmi_msm_audio_info_setup);
static void hdmi_msm_en_gc_packet(boolean av_mute_is_requested)
{
/* HDMI_GC[0x0040] */
HDMI_OUTP(0x0040, av_mute_is_requested ? 1 : 0);
/* GC packet enable (every frame) */
/* HDMI_VBI_PKT_CTRL[0x0028] */
hdmi_msm_rmw32or(0x0028, 3 << 4);
}
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_ISRC_ACP_SUPPORT
static void hdmi_msm_en_isrc_packet(boolean isrc_is_continued)
{
static const char isrc_psuedo_data[] =
"ISRC1:0123456789isrc2=ABCDEFGHIJ";
const uint32 * isrc_data = (const uint32 *) isrc_psuedo_data;
/* ISRC_STATUS =0b010 | ISRC_CONTINUE | ISRC_VALID */
/* HDMI_ISRC1_0[0x00048] */
HDMI_OUTP(0x00048, 2 | (isrc_is_continued ? 1 : 0) << 6 | 0 << 7);
/* HDMI_ISRC1_1[0x004C] */
HDMI_OUTP(0x004C, *isrc_data++);
/* HDMI_ISRC1_2[0x0050] */
HDMI_OUTP(0x0050, *isrc_data++);
/* HDMI_ISRC1_3[0x0054] */
HDMI_OUTP(0x0054, *isrc_data++);
/* HDMI_ISRC1_4[0x0058] */
HDMI_OUTP(0x0058, *isrc_data++);
/* HDMI_ISRC2_0[0x005C] */
HDMI_OUTP(0x005C, *isrc_data++);
/* HDMI_ISRC2_1[0x0060] */
HDMI_OUTP(0x0060, *isrc_data++);
/* HDMI_ISRC2_2[0x0064] */
HDMI_OUTP(0x0064, *isrc_data++);
/* HDMI_ISRC2_3[0x0068] */
HDMI_OUTP(0x0068, *isrc_data);
/* HDMI_VBI_PKT_CTRL[0x0028] */
/* ISRC Send + Continuous */
hdmi_msm_rmw32or(0x0028, 3 << 8);
}
#else
static void hdmi_msm_en_isrc_packet(boolean isrc_is_continued)
{
/*
* Until end-to-end support for various audio packets
*/
}
#endif
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_ISRC_ACP_SUPPORT
static void hdmi_msm_en_acp_packet(uint32 byte1)
{
/* HDMI_ACP[0x003C] */
HDMI_OUTP(0x003C, 2 | 1 << 8 | byte1 << 16);
/* HDMI_VBI_PKT_CTRL[0x0028] */
/* ACP send, s/w source */
hdmi_msm_rmw32or(0x0028, 3 << 12);
}
#else
static void hdmi_msm_en_acp_packet(uint32 byte1)
{
/*
* Until end-to-end support for various audio packets
*/
}
#endif
int hdmi_msm_audio_get_sample_rate(void)
{
return msm_hdmi_sample_rate;
}
EXPORT_SYMBOL(hdmi_msm_audio_get_sample_rate);
void hdmi_msm_audio_sample_rate_reset(int rate)
{
msm_hdmi_sample_rate = rate;
if (hdmi_msm_state->hdcp_enable)
hdcp_deauthenticate();
else
hdmi_msm_turn_on();
}
EXPORT_SYMBOL(hdmi_msm_audio_sample_rate_reset);
static void hdmi_msm_audio_setup(void)
{
const int channels = MSM_HDMI_AUDIO_CHANNEL_2;
/* (0) for clr_avmute, (1) for set_avmute */
hdmi_msm_en_gc_packet(0);
/* (0) for isrc1 only, (1) for isrc1 and isrc2 */
hdmi_msm_en_isrc_packet(1);
/* arbitrary bit pattern for byte1 */
hdmi_msm_en_acp_packet(0x5a);
DEV_DBG("Not setting ACP, ISRC1, ISRC2 packets\n");
hdmi_msm_audio_acr_setup(TRUE,
external_common_state->video_resolution,
msm_hdmi_sample_rate, channels);
hdmi_msm_audio_info_setup(TRUE, channels, 0, 0, FALSE);
/* Turn on Audio FIFO and SAM DROP ISR */
HDMI_OUTP(0x02CC, HDMI_INP(0x02CC) | BIT(1) | BIT(3));
DEV_INFO("HDMI Audio: Enabled\n");
}
static int hdmi_msm_audio_off(void)
{
uint32 audio_cfg;
int i, timeout_val = 50;
for (i = 0; (i < timeout_val) &&
((audio_cfg = HDMI_INP_ND(0x01D0)) & BIT(0)); i++) {
DEV_DBG("%s: %d times: AUDIO CFG is %08xi\n", __func__,
i+1, audio_cfg);
if (!((i+1) % 10)) {
DEV_ERR("%s: audio still on after %d sec. try again\n",
__func__, (i+1)/10);
SWITCH_SET_HDMI_AUDIO(0, 1);
}
msleep(100);
}
if (i == timeout_val)
DEV_ERR("%s: Error: cannot turn off audio engine\n", __func__);
hdmi_msm_audio_info_setup(FALSE, 0, 0, 0, FALSE);
hdmi_msm_audio_acr_setup(FALSE, 0, 0, 0);
DEV_INFO("HDMI Audio: Disabled\n");
return 0;
}
static uint8 hdmi_msm_avi_iframe_lut[][16] = {
/* 480p60 480i60 576p50 576i50 720p60 720p50 1080p60 1080i60 1080p50
1080i50 1080p24 1080p30 1080p25 640x480p 480p60_16_9 576p50_4_3 */
{0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10}, /*00*/
{0x18, 0x18, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
0x28, 0x28, 0x28, 0x28, 0x18, 0x28, 0x18}, /*01*/
{0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
0x04, 0x04, 0x04, 0x04, 0x88, 0x00, 0x04}, /*02*/
{0x02, 0x06, 0x11, 0x15, 0x04, 0x13, 0x10, 0x05, 0x1F,
0x14, 0x20, 0x22, 0x21, 0x01, 0x03, 0x11}, /*03*/
{0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*04*/
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*05*/
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*06*/
{0xE1, 0xE1, 0x41, 0x41, 0xD1, 0xd1, 0x39, 0x39, 0x39,
0x39, 0x39, 0x39, 0x39, 0xe1, 0xE1, 0x41}, /*07*/
{0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x04, 0x04, 0x04,
0x04, 0x04, 0x04, 0x04, 0x01, 0x01, 0x02}, /*08*/
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*09*/
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*10*/
{0xD1, 0xD1, 0xD1, 0xD1, 0x01, 0x01, 0x81, 0x81, 0x81,
0x81, 0x81, 0x81, 0x81, 0x81, 0xD1, 0xD1}, /*11*/
{0x02, 0x02, 0x02, 0x02, 0x05, 0x05, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x02, 0x02, 0x02} /*12*/
};
static void hdmi_msm_avi_info_frame(void)
{
/* two header + length + 13 data */
uint8 aviInfoFrame[16];
uint8 checksum;
uint32 sum;
uint32 regVal;
int i;
int mode = 0;
boolean use_ce_scan_info = TRUE;
switch (external_common_state->video_resolution) {
case HDMI_VFRMT_720x480p60_4_3:
mode = 0;
break;
case HDMI_VFRMT_720x480i60_16_9:
mode = 1;
break;
case HDMI_VFRMT_720x576p50_16_9:
mode = 2;
break;
case HDMI_VFRMT_720x576i50_16_9:
mode = 3;
break;
case HDMI_VFRMT_1280x720p60_16_9:
mode = 4;
break;
case HDMI_VFRMT_1280x720p50_16_9:
mode = 5;
break;
case HDMI_VFRMT_1920x1080p60_16_9:
mode = 6;
break;
case HDMI_VFRMT_1920x1080i60_16_9:
mode = 7;
break;
case HDMI_VFRMT_1920x1080p50_16_9:
mode = 8;
break;
case HDMI_VFRMT_1920x1080i50_16_9:
mode = 9;
break;
case HDMI_VFRMT_1920x1080p24_16_9:
mode = 10;
break;
case HDMI_VFRMT_1920x1080p30_16_9:
mode = 11;
break;
case HDMI_VFRMT_1920x1080p25_16_9:
mode = 12;
break;
case HDMI_VFRMT_640x480p60_4_3:
mode = 13;
break;
case HDMI_VFRMT_720x480p60_16_9:
mode = 14;
break;
case HDMI_VFRMT_720x576p50_4_3:
mode = 15;
break;
default:
DEV_INFO("%s: mode %d not supported\n", __func__,
external_common_state->video_resolution);
return;
}
/* InfoFrame Type = 82 */
aviInfoFrame[0] = 0x82;
/* Version = 2 */
aviInfoFrame[1] = 2;
/* Length of AVI InfoFrame = 13 */
aviInfoFrame[2] = 13;
/* Data Byte 01: 0 Y1 Y0 A0 B1 B0 S1 S0 */
aviInfoFrame[3] = hdmi_msm_avi_iframe_lut[0][mode];
/*
* If the sink specified support for both underscan/overscan
* then, by default, set the underscan bit.
* Only checking underscan support for preferred format and cea formats
*/
if ((external_common_state->video_resolution ==
external_common_state->preferred_video_format)) {
use_ce_scan_info = FALSE;
switch (external_common_state->pt_scan_info) {
case 0:
/*
* Need to use the info specified for the corresponding
* IT or CE format
*/
DEV_DBG("%s: No underscan information specified for the"
" preferred video format\n", __func__);
use_ce_scan_info = TRUE;
break;
case 3:
DEV_DBG("%s: Setting underscan bit for the preferred"
" video format\n", __func__);
aviInfoFrame[3] |= 0x02;
break;
default:
DEV_DBG("%s: Underscan information not set for the"
" preferred video format\n", __func__);
break;
}
}
if (use_ce_scan_info) {
if (3 == external_common_state->ce_scan_info) {
DEV_DBG("%s: Setting underscan bit for the CE video"
" format\n", __func__);
aviInfoFrame[3] |= 0x02;
} else {
DEV_DBG("%s: Not setting underscan bit for the CE video"
" format\n", __func__);
}
}
/* Data Byte 02: C1 C0 M1 M0 R3 R2 R1 R0 */
aviInfoFrame[4] = hdmi_msm_avi_iframe_lut[1][mode];
/* Data Byte 03: ITC EC2 EC1 EC0 Q1 Q0 SC1 SC0 */
aviInfoFrame[5] = hdmi_msm_avi_iframe_lut[2][mode];
/* Data Byte 04: 0 VIC6 VIC5 VIC4 VIC3 VIC2 VIC1 VIC0 */
aviInfoFrame[6] = hdmi_msm_avi_iframe_lut[3][mode];
/* Data Byte 05: 0 0 0 0 PR3 PR2 PR1 PR0 */
aviInfoFrame[7] = hdmi_msm_avi_iframe_lut[4][mode];
/* Data Byte 06: LSB Line No of End of Top Bar */
aviInfoFrame[8] = hdmi_msm_avi_iframe_lut[5][mode];
/* Data Byte 07: MSB Line No of End of Top Bar */
aviInfoFrame[9] = hdmi_msm_avi_iframe_lut[6][mode];
/* Data Byte 08: LSB Line No of Start of Bottom Bar */
aviInfoFrame[10] = hdmi_msm_avi_iframe_lut[7][mode];
/* Data Byte 09: MSB Line No of Start of Bottom Bar */
aviInfoFrame[11] = hdmi_msm_avi_iframe_lut[8][mode];
/* Data Byte 10: LSB Pixel Number of End of Left Bar */
aviInfoFrame[12] = hdmi_msm_avi_iframe_lut[9][mode];
/* Data Byte 11: MSB Pixel Number of End of Left Bar */
aviInfoFrame[13] = hdmi_msm_avi_iframe_lut[10][mode];
/* Data Byte 12: LSB Pixel Number of Start of Right Bar */
aviInfoFrame[14] = hdmi_msm_avi_iframe_lut[11][mode];
/* Data Byte 13: MSB Pixel Number of Start of Right Bar */
aviInfoFrame[15] = hdmi_msm_avi_iframe_lut[12][mode];
sum = 0;
for (i = 0; i < 16; i++)
sum += aviInfoFrame[i];
sum &= 0xFF;
sum = 256 - sum;
checksum = (uint8) sum;
regVal = aviInfoFrame[5];
regVal = regVal << 8 | aviInfoFrame[4];
regVal = regVal << 8 | aviInfoFrame[3];
regVal = regVal << 8 | checksum;
HDMI_OUTP(0x006C, regVal);
regVal = aviInfoFrame[9];
regVal = regVal << 8 | aviInfoFrame[8];
regVal = regVal << 8 | aviInfoFrame[7];
regVal = regVal << 8 | aviInfoFrame[6];
HDMI_OUTP(0x0070, regVal);
regVal = aviInfoFrame[13];
regVal = regVal << 8 | aviInfoFrame[12];
regVal = regVal << 8 | aviInfoFrame[11];
regVal = regVal << 8 | aviInfoFrame[10];
HDMI_OUTP(0x0074, regVal);
regVal = aviInfoFrame[1];
regVal = regVal << 16 | aviInfoFrame[15];
regVal = regVal << 8 | aviInfoFrame[14];
HDMI_OUTP(0x0078, regVal);
/* INFOFRAME_CTRL0[0x002C] */
/* 0x3 for AVI InfFrame enable (every frame) */
HDMI_OUTP(0x002C, HDMI_INP(0x002C) | 0x00000003L);
}
#ifdef CONFIG_FB_MSM_HDMI_3D
static void hdmi_msm_vendor_infoframe_packetsetup(void)
{
uint32 packet_header = 0;
uint32 check_sum = 0;
uint32 packet_payload = 0;
if (!external_common_state->format_3d) {
HDMI_OUTP(0x0034, 0);
return;
}
/* 0x0084 GENERIC0_HDR
* HB0 7:0 NUM
* HB1 15:8 NUM
* HB2 23:16 NUM */
/* Setup Packet header and payload */
/* 0x81 VS_INFO_FRAME_ID
0x01 VS_INFO_FRAME_VERSION
0x1B VS_INFO_FRAME_PAYLOAD_LENGTH */
packet_header = 0x81 | (0x01 << 8) | (0x1B << 16);
HDMI_OUTP(0x0084, packet_header);
check_sum = packet_header & 0xff;
check_sum += (packet_header >> 8) & 0xff;
check_sum += (packet_header >> 16) & 0xff;
/* 0x008C GENERIC0_1
* BYTE4 7:0 NUM
* BYTE5 15:8 NUM
* BYTE6 23:16 NUM
* BYTE7 31:24 NUM */
/* 0x02 VS_INFO_FRAME_3D_PRESENT */
packet_payload = 0x02 << 5;
switch (external_common_state->format_3d) {
case 1:
/* 0b1000 VIDEO_3D_FORMAT_SIDE_BY_SIDE_HALF */
packet_payload |= (0x08 << 8) << 4;
break;
case 2:
/* 0b0110 VIDEO_3D_FORMAT_TOP_AND_BOTTOM_HALF */
packet_payload |= (0x06 << 8) << 4;
break;
}
HDMI_OUTP(0x008C, packet_payload);
check_sum += packet_payload & 0xff;
check_sum += (packet_payload >> 8) & 0xff;
#define IEEE_REGISTRATION_ID 0xC03
/* Next 3 bytes are IEEE Registration Identifcation */
/* 0x0088 GENERIC0_0
* BYTE0 7:0 NUM (checksum)
* BYTE1 15:8 NUM
* BYTE2 23:16 NUM
* BYTE3 31:24 NUM */
check_sum += IEEE_REGISTRATION_ID & 0xff;
check_sum += (IEEE_REGISTRATION_ID >> 8) & 0xff;
check_sum += (IEEE_REGISTRATION_ID >> 16) & 0xff;
HDMI_OUTP(0x0088, (0x100 - (0xff & check_sum))
| ((IEEE_REGISTRATION_ID & 0xff) << 8)
| (((IEEE_REGISTRATION_ID >> 8) & 0xff) << 16)
| (((IEEE_REGISTRATION_ID >> 16) & 0xff) << 24));
/* 0x0034 GEN_PKT_CTRL
* GENERIC0_SEND 0 0 = Disable Generic0 Packet Transmission
* 1 = Enable Generic0 Packet Transmission
* GENERIC0_CONT 1 0 = Send Generic0 Packet on next frame only
* 1 = Send Generic0 Packet on every frame
* GENERIC0_UPDATE 2 NUM
* GENERIC1_SEND 4 0 = Disable Generic1 Packet Transmission
* 1 = Enable Generic1 Packet Transmission
* GENERIC1_CONT 5 0 = Send Generic1 Packet on next frame only
* 1 = Send Generic1 Packet on every frame
* GENERIC0_LINE 21:16 NUM
* GENERIC1_LINE 29:24 NUM
*/
/* GENERIC0_LINE | GENERIC0_UPDATE | GENERIC0_CONT | GENERIC0_SEND
* Setup HDMI TX generic packet control
* Enable this packet to transmit every frame
* Enable this packet to transmit every frame
* Enable HDMI TX engine to transmit Generic packet 0 */
HDMI_OUTP(0x0034, (1 << 16) | (1 << 2) | BIT(1) | BIT(0));
}
static void hdmi_msm_switch_3d(boolean on)
{
mutex_lock(&external_common_state_hpd_mutex);
if (external_common_state->hpd_state)
hdmi_msm_vendor_infoframe_packetsetup();
mutex_unlock(&external_common_state_hpd_mutex);
}
#endif
#define IFRAME_CHECKSUM_32(d) \
((d & 0xff) + ((d >> 8) & 0xff) + \
((d >> 16) & 0xff) + ((d >> 24) & 0xff))
static void hdmi_msm_spd_infoframe_packetsetup(void)
{
uint32 packet_header = 0;
uint32 check_sum = 0;
uint32 packet_payload = 0;
uint32 packet_control = 0;
uint8 *vendor_name = external_common_state->spd_vendor_name;
uint8 *product_description =
external_common_state->spd_product_description;
/* 0x00A4 GENERIC1_HDR
* HB0 7:0 NUM
* HB1 15:8 NUM
* HB2 23:16 NUM */
/* Setup Packet header and payload */
/* 0x83 InfoFrame Type Code
0x01 InfoFrame Version Number
0x19 Length of Source Product Description InfoFrame
*/
packet_header = 0x83 | (0x01 << 8) | (0x19 << 16);
HDMI_OUTP(0x00A4, packet_header);
check_sum += IFRAME_CHECKSUM_32(packet_header);
/* 0x00AC GENERIC1_1
* BYTE4 7:0 VENDOR_NAME[3]
* BYTE5 15:8 VENDOR_NAME[4]
* BYTE6 23:16 VENDOR_NAME[5]
* BYTE7 31:24 VENDOR_NAME[6] */
packet_payload = (vendor_name[3] & 0x7f)
| ((vendor_name[4] & 0x7f) << 8)
| ((vendor_name[5] & 0x7f) << 16)
| ((vendor_name[6] & 0x7f) << 24);
HDMI_OUTP(0x00AC, packet_payload);
check_sum += IFRAME_CHECKSUM_32(packet_payload);
/* Product Description (7-bit ASCII code) */
/* 0x00B0 GENERIC1_2
* BYTE8 7:0 VENDOR_NAME[7]
* BYTE9 15:8 PRODUCT_NAME[ 0]
* BYTE10 23:16 PRODUCT_NAME[ 1]
* BYTE11 31:24 PRODUCT_NAME[ 2] */
packet_payload = (vendor_name[7] & 0x7f)
| ((product_description[0] & 0x7f) << 8)
| ((product_description[1] & 0x7f) << 16)
| ((product_description[2] & 0x7f) << 24);
HDMI_OUTP(0x00B0, packet_payload);
check_sum += IFRAME_CHECKSUM_32(packet_payload);
/* 0x00B4 GENERIC1_3
* BYTE12 7:0 PRODUCT_NAME[ 3]
* BYTE13 15:8 PRODUCT_NAME[ 4]
* BYTE14 23:16 PRODUCT_NAME[ 5]
* BYTE15 31:24 PRODUCT_NAME[ 6] */
packet_payload = (product_description[3] & 0x7f)
| ((product_description[4] & 0x7f) << 8)
| ((product_description[5] & 0x7f) << 16)
| ((product_description[6] & 0x7f) << 24);
HDMI_OUTP(0x00B4, packet_payload);
check_sum += IFRAME_CHECKSUM_32(packet_payload);
/* 0x00B8 GENERIC1_4
* BYTE16 7:0 PRODUCT_NAME[ 7]
* BYTE17 15:8 PRODUCT_NAME[ 8]
* BYTE18 23:16 PRODUCT_NAME[ 9]
* BYTE19 31:24 PRODUCT_NAME[10] */
packet_payload = (product_description[7] & 0x7f)
| ((product_description[8] & 0x7f) << 8)
| ((product_description[9] & 0x7f) << 16)
| ((product_description[10] & 0x7f) << 24);
HDMI_OUTP(0x00B8, packet_payload);
check_sum += IFRAME_CHECKSUM_32(packet_payload);
/* 0x00BC GENERIC1_5
* BYTE20 7:0 PRODUCT_NAME[11]
* BYTE21 15:8 PRODUCT_NAME[12]
* BYTE22 23:16 PRODUCT_NAME[13]
* BYTE23 31:24 PRODUCT_NAME[14] */
packet_payload = (product_description[11] & 0x7f)
| ((product_description[12] & 0x7f) << 8)
| ((product_description[13] & 0x7f) << 16)
| ((product_description[14] & 0x7f) << 24);
HDMI_OUTP(0x00BC, packet_payload);
check_sum += IFRAME_CHECKSUM_32(packet_payload);
/* 0x00C0 GENERIC1_6
* BYTE24 7:0 PRODUCT_NAME[15]
* BYTE25 15:8 Source Device Information
* BYTE26 23:16 NUM
* BYTE27 31:24 NUM */
/* Source Device Information
* 00h unknown
* 01h Digital STB
* 02h DVD
* 03h D-VHS
* 04h HDD Video
* 05h DVC
* 06h DSC
* 07h Video CD
* 08h Game
* 09h PC general */
packet_payload = (product_description[15] & 0x7f) | 0x00 << 8;
HDMI_OUTP(0x00C0, packet_payload);
check_sum += IFRAME_CHECKSUM_32(packet_payload);
/* Vendor Name (7bit ASCII code) */
/* 0x00A8 GENERIC1_0
* BYTE0 7:0 CheckSum
* BYTE1 15:8 VENDOR_NAME[0]
* BYTE2 23:16 VENDOR_NAME[1]
* BYTE3 31:24 VENDOR_NAME[2] */
packet_payload = ((vendor_name[0] & 0x7f) << 8)
| ((vendor_name[1] & 0x7f) << 16)
| ((vendor_name[2] & 0x7f) << 24);
check_sum += IFRAME_CHECKSUM_32(packet_payload);
packet_payload |= ((0x100 - (0xff & check_sum)) & 0xff);
HDMI_OUTP(0x00A8, packet_payload);
/* GENERIC1_LINE | GENERIC1_CONT | GENERIC1_SEND
* Setup HDMI TX generic packet control
* Enable this packet to transmit every frame
* Enable HDMI TX engine to transmit Generic packet 1 */
packet_control = HDMI_INP_ND(0x0034);
packet_control |= ((0x1 << 24) | (1 << 5) | (1 << 4));
HDMI_OUTP(0x0034, packet_control);
}
int hdmi_msm_clk(int on)
{
int rc;
DEV_DBG("HDMI Clk: %s\n", on ? "Enable" : "Disable");
if (on) {
rc = clk_prepare_enable(hdmi_msm_state->hdmi_app_clk);
if (rc) {
DEV_ERR("'hdmi_app_clk' clock enable failed, rc=%d\n",
rc);
return rc;
}
rc = clk_prepare_enable(hdmi_msm_state->hdmi_m_pclk);
if (rc) {
DEV_ERR("'hdmi_m_pclk' clock enable failed, rc=%d\n",
rc);
return rc;
}
rc = clk_prepare_enable(hdmi_msm_state->hdmi_s_pclk);
if (rc) {
DEV_ERR("'hdmi_s_pclk' clock enable failed, rc=%d\n",
rc);
return rc;
}
} else {
clk_disable_unprepare(hdmi_msm_state->hdmi_app_clk);
clk_disable_unprepare(hdmi_msm_state->hdmi_m_pclk);
clk_disable_unprepare(hdmi_msm_state->hdmi_s_pclk);
}
return 0;
}
static void hdmi_msm_turn_on(void)
{
uint32 audio_pkt_ctrl, audio_cfg;
/*
* Number of wait iterations for QDSP to disable Audio Engine
* before resetting HDMI core
*/
int i = 10;
audio_pkt_ctrl = HDMI_INP_ND(0x0020);
audio_cfg = HDMI_INP_ND(0x01D0);
/*
* Checking BIT[0] of AUDIO PACKET CONTROL and
* AUDIO CONFIGURATION register
*/
while (((audio_pkt_ctrl & 0x00000001) || (audio_cfg & 0x00000001))
&& (i--)) {
audio_pkt_ctrl = HDMI_INP_ND(0x0020);
audio_cfg = HDMI_INP_ND(0x01D0);
DEV_DBG("%d times :: HDMI AUDIO PACKET is %08x and "
"AUDIO CFG is %08x", i, audio_pkt_ctrl, audio_cfg);
msleep(20);
}
hdmi_msm_set_mode(FALSE);
mutex_lock(&hdcp_auth_state_mutex);
hdmi_msm_reset_core();
mutex_unlock(&hdcp_auth_state_mutex);
hdmi_msm_init_phy(external_common_state->video_resolution);
/* HDMI_USEC_REFTIMER[0x0208] */
HDMI_OUTP(0x0208, 0x0001001B);
hdmi_msm_set_mode(TRUE);
hdmi_msm_video_setup(external_common_state->video_resolution);
if (!hdmi_msm_is_dvi_mode()) {
hdmi_msm_audio_setup();
/*
* Send the audio switch device notification if HDCP is
* not enabled. Otherwise, the notification would be
* sent after HDCP authentication is successful.
*/
if (!hdmi_msm_state->hdcp_enable)
SWITCH_SET_HDMI_AUDIO(1, 0);
}
hdmi_msm_avi_info_frame();
#ifdef CONFIG_FB_MSM_HDMI_3D
hdmi_msm_vendor_infoframe_packetsetup();
#endif
hdmi_msm_spd_infoframe_packetsetup();
if (hdmi_msm_state->hdcp_enable && hdmi_msm_state->reauth) {
hdmi_msm_hdcp_enable();
hdmi_msm_state->reauth = FALSE ;
}
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
/* re-initialize CEC if enabled */
mutex_lock(&hdmi_msm_state_mutex);
if (hdmi_msm_state->cec_enabled == true) {
hdmi_msm_cec_init();
hdmi_msm_cec_write_logical_addr(
hdmi_msm_state->cec_logical_addr);
}
mutex_unlock(&hdmi_msm_state_mutex);
#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT */
DEV_INFO("HDMI Core: Initialized\n");
}
static void hdmi_msm_hdcp_timer(unsigned long data)
{
if (!hdmi_msm_state->hdcp_enable) {
DEV_DBG("%s: HDCP not enabled\n", __func__);
return;
}
queue_work(hdmi_work_queue, &hdmi_msm_state->hdcp_work);
}
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
static void hdmi_msm_cec_read_timer_func(unsigned long data)
{
queue_work(hdmi_work_queue, &hdmi_msm_state->cec_latch_detect_work);
}
#endif
static void hdmi_msm_hpd_polarity_setup(void)
{
u32 cable_sense;
bool polarity = !external_common_state->hpd_state;
bool trigger = false;
if (polarity)
HDMI_OUTP(0x0254, BIT(2) | BIT(1));
else
HDMI_OUTP(0x0254, BIT(2));
cable_sense = (HDMI_INP(0x0250) & BIT(1)) >> 1;
if (cable_sense == polarity)
trigger = true;
DEV_DBG("%s: listen=%s, sense=%s, trigger=%s\n", __func__,
polarity ? "connect" : "disconnect",
cable_sense ? "connect" : "disconnect",
trigger ? "Yes" : "No");
if (trigger) {
u32 reg_val = HDMI_INP(0x0258);
/* Toggle HPD circuit to trigger HPD sense */
HDMI_OUTP(0x0258, reg_val & ~BIT(28));
HDMI_OUTP(0x0258, reg_val | BIT(28));
}
}
static void hdmi_msm_hpd_off(void)
{
int rc = 0;
if (!hdmi_msm_state->hpd_initialized) {
DEV_DBG("%s: HPD is already OFF, returning\n", __func__);
return;
}
DEV_DBG("%s: (timer, 5V, IRQ off)\n", __func__);
disable_irq(hdmi_msm_state->irq);
/* Disable HPD interrupt */
HDMI_OUTP(0x0254, 0);
DEV_DBG("%s: Disabling HPD_CTRLd\n", __func__);
hdmi_msm_set_mode(FALSE);
hdmi_msm_state->pd->enable_5v(0);
hdmi_msm_clk(0);
rc = hdmi_msm_state->pd->gpio_config(0);
if (rc != 0)
DEV_INFO("%s: Failed to disable GPIOs. Error=%d\n",
__func__, rc);
hdmi_msm_state->hpd_initialized = FALSE;
}
static void hdmi_msm_dump_regs(const char *prefix)
{
#ifdef REG_DUMP
print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
(void *)MSM_HDMI_BASE, 0x0334, false);
#endif
}
static int hdmi_msm_hpd_on(void)
{
static int phy_reset_done;
uint32 hpd_ctrl;
int rc = 0;
if (hdmi_msm_state->hpd_initialized) {
DEV_DBG("%s: HPD is already ON\n", __func__);
} else {
rc = hdmi_msm_state->pd->gpio_config(1);
if (rc) {
DEV_ERR("%s: Failed to enable GPIOs. Error=%d\n",
__func__, rc);
goto error1;
}
rc = hdmi_msm_clk(1);
if (rc) {
DEV_ERR("%s: Failed to enable clocks. Error=%d\n",
__func__, rc);
goto error2;
}
rc = hdmi_msm_state->pd->enable_5v(1);
if (rc) {
DEV_ERR("%s: Failed to enable 5V regulator. Error=%d\n",
__func__, rc);
goto error3;
}
hdmi_msm_dump_regs("HDMI-INIT: ");
hdmi_msm_set_mode(FALSE);
if (!phy_reset_done) {
hdmi_phy_reset();
phy_reset_done = 1;
}
hdmi_msm_set_mode(TRUE);
/* HDMI_USEC_REFTIMER[0x0208] */
HDMI_OUTP(0x0208, 0x0001001B);
/* Set up HPD state variables */
mutex_lock(&external_common_state_hpd_mutex);
external_common_state->hpd_state = 0;
mutex_unlock(&external_common_state_hpd_mutex);
mutex_lock(&hdmi_msm_state_mutex);
mutex_unlock(&hdmi_msm_state_mutex);
enable_irq(hdmi_msm_state->irq);
hdmi_msm_state->hpd_initialized = TRUE;
/* set timeout to 4.1ms (max) for hardware debounce */
hpd_ctrl = HDMI_INP(0x0258) | 0x1FFF;
/* Turn on HPD HW circuit */
HDMI_OUTP(0x0258, hpd_ctrl | BIT(28));
/* Set HPD cable sense polarity */
hdmi_msm_hpd_polarity_setup();
}
DEV_DBG("%s: (IRQ, 5V on)\n", __func__);
return 0;
error3:
hdmi_msm_clk(0);
error2:
hdmi_msm_state->pd->gpio_config(0);
error1:
return rc;
}
static int hdmi_msm_power_ctrl(boolean enable)
{
int rc = 0;
if (enable) {
/*
* Enable HPD only if the UI option is on or if
* HDMI is configured as the primary display
*/
if (hdmi_prim_display ||
external_common_state->hpd_feature_on) {
DEV_DBG("%s: Turning HPD ciruitry on\n", __func__);
/*[ECID:000000] ZTEBSP wanghaifei start 20130221, add qcom new patch for HDP resume wait*/
if (external_common_state->pre_suspend_hpd_state) {
external_common_state->pre_suspend_hpd_state =
false;
hdmi_msm_send_event(HPD_EVENT_OFFLINE);
}
/*[ECID:000000] ZTEBSP wanghaifei end 20130221, add qcom new patch for HDP resume wait*/
rc = hdmi_msm_hpd_on();
if (rc) {
DEV_ERR("%s: HPD ON FAILED\n", __func__);
return rc;
}
/*[ECID:000000] ZTEBSP wanghaifei start 20130221, add qcom new patch for HDP resume wait*/
#if 0
/* Wait for HPD initialization to complete */
INIT_COMPLETION(hdmi_msm_state->hpd_event_processed);
time = wait_for_completion_interruptible_timeout(
&hdmi_msm_state->hpd_event_processed, HZ);
if (!time && !external_common_state->hpd_state) {
DEV_DBG("%s: cable not detected\n", __func__);
queue_work(hdmi_work_queue,
&hdmi_msm_state->hpd_state_work);
}
#endif
/*[ECID:000000] ZTEBSP wanghaifei end 20130221, add qcom new patch for HDP resume wait*/
}
} else {
DEV_DBG("%s: Turning HPD ciruitry off\n", __func__);
/*[ECID:000000] ZTEBSP wanghaifei start 20130221, add qcom new patch for HDP resume wait*/
external_common_state->pre_suspend_hpd_state =
external_common_state->hpd_state;
/*[ECID:000000] ZTEBSP wanghaifei end 20130221, add qcom new patch for HDP resume wait*/
hdmi_msm_hpd_off();
}
return rc;
}
static int hdmi_msm_power_on(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
int ret = 0;
bool changed;
if (!hdmi_ready()) {
DEV_ERR("%s: HDMI/HPD not initialized\n", __func__);
return ret;
}
if (!external_common_state->hpd_state) {
DEV_DBG("%s:HDMI cable not connected\n", __func__);
goto error;
}
/* Only start transmission with supported resolution */
changed = hdmi_common_get_video_format_from_drv_data(mfd);
if (changed || external_common_state->default_res_supported) {
mutex_lock(&external_common_state_hpd_mutex);
if (external_common_state->hpd_state &&
hdmi_msm_is_power_on()) {
mutex_unlock(&external_common_state_hpd_mutex);
DEV_INFO("HDMI cable connected %s(%dx%d, %d)\n",
__func__, mfd->var_xres, mfd->var_yres,
mfd->var_pixclock);
hdmi_msm_turn_on();
hdmi_msm_state->panel_power_on = TRUE;
if (hdmi_msm_state->hdcp_enable) {
/* Kick off HDCP Authentication */
mutex_lock(&hdcp_auth_state_mutex);
hdmi_msm_state->reauth = FALSE;
hdmi_msm_state->full_auth_done = FALSE;
mutex_unlock(&hdcp_auth_state_mutex);
mod_timer(&hdmi_msm_state->hdcp_timer,
jiffies + HZ/2);
}
} else {
mutex_unlock(&external_common_state_hpd_mutex);
}
hdmi_msm_dump_regs("HDMI-ON: ");
DEV_INFO("power=%s DVI= %s\n",
hdmi_msm_is_power_on() ? "ON" : "OFF" ,
hdmi_msm_is_dvi_mode() ? "ON" : "OFF");
} else {
DEV_ERR("%s: Video fmt %d not supp. Returning\n",
__func__,
external_common_state->video_resolution);
}
error:
/* Set HPD cable sense polarity */
hdmi_msm_hpd_polarity_setup();
return ret;
}
void mhl_connect_api(boolean on)
{
char *envp[2];
/* Simulating a HPD event based on MHL event */
if (on) {
hdmi_msm_read_edid();
hdmi_msm_state->reauth = FALSE ;
/* Build EDID table */
hdmi_msm_turn_on();
DEV_INFO("HDMI HPD: CONNECTED: send ONLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_ONLINE);
envp[0] = 0;
if (!hdmi_msm_state->hdcp_enable) {
/* Send Audio for HDMI Compliance Cases*/
envp[0] = "HDCP_STATE=PASS";
envp[1] = NULL;
DEV_INFO("HDMI HPD: sense : send HDCP_PASS\n");
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
switch_set_state(&external_common_state->sdev, 1);
DEV_INFO("%s: hdmi state switched to %d\n",
__func__, external_common_state->sdev.state);
} else {
hdmi_msm_hdcp_enable();
}
} else {
DEV_INFO("HDMI HPD: DISCONNECTED: send OFFLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_OFFLINE);
switch_set_state(&external_common_state->sdev, 0);
DEV_INFO("%s: hdmi state switched to %d\n", __func__,
external_common_state->sdev.state);
}
}
EXPORT_SYMBOL(mhl_connect_api);
/* Note that power-off will also be called when the cable-remove event is
* processed on the user-space and as a result the framebuffer is powered
* down. However, we are still required to be able to detect a cable-insert
* event; so for now leave the HDMI engine running; so that the HPD IRQ is
* still being processed.
*/
static int hdmi_msm_power_off(struct platform_device *pdev)
{
int ret = 0;
if (!hdmi_ready()) {
DEV_ERR("%s: HDMI/HPD not initialized\n", __func__);
return ret;
}
if (!hdmi_msm_state->panel_power_on) {
DEV_DBG("%s: panel not ON\n", __func__);
goto error;
}
if (hdmi_msm_state->hdcp_enable) {
if (hdmi_msm_state->hdcp_activating) {
/*
* Let the HDCP work know that we got an HPD
* disconnect so that it can stop the
* reauthentication loop.
*/
mutex_lock(&hdcp_auth_state_mutex);
hdmi_msm_state->hpd_during_auth = TRUE;
mutex_unlock(&hdcp_auth_state_mutex);
}
/*
* Cancel any pending reauth attempts.
* If one is ongoing, wait for it to finish
*/
cancel_work_sync(&hdmi_msm_state->hdcp_reauth_work);
cancel_work_sync(&hdmi_msm_state->hdcp_work);
del_timer_sync(&hdmi_msm_state->hdcp_timer);
hdcp_deauthenticate();
}
SWITCH_SET_HDMI_AUDIO(0, 0);
if (!hdmi_msm_is_dvi_mode())
hdmi_msm_audio_off();
hdmi_msm_powerdown_phy();
hdmi_msm_state->panel_power_on = FALSE;
DEV_INFO("power: OFF (audio off)\n");
if (!completion_done(&hdmi_msm_state->hpd_event_processed))
complete(&hdmi_msm_state->hpd_event_processed);
error:
/* Set HPD cable sense polarity */
hdmi_msm_hpd_polarity_setup();
return ret;
}
void hdmi_msm_config_hdcp_feature(void)
{
if (hdcp_feature_on && hdmi_msm_has_hdcp()) {
init_timer(&hdmi_msm_state->hdcp_timer);
hdmi_msm_state->hdcp_timer.function = hdmi_msm_hdcp_timer;
hdmi_msm_state->hdcp_timer.data = (uint32)NULL;
hdmi_msm_state->hdcp_timer.expires = 0xffffffffL;
init_completion(&hdmi_msm_state->hdcp_success_done);
INIT_WORK(&hdmi_msm_state->hdcp_reauth_work,
hdmi_msm_hdcp_reauth_work);
INIT_WORK(&hdmi_msm_state->hdcp_work, hdmi_msm_hdcp_work);
hdmi_msm_state->hdcp_enable = TRUE;
} else {
del_timer(&hdmi_msm_state->hdcp_timer);
hdmi_msm_state->hdcp_enable = FALSE;
}
external_common_state->present_hdcp = hdmi_msm_state->hdcp_enable;
DEV_INFO("%s: HDCP Feature: %s\n", __func__,
hdmi_msm_state->hdcp_enable ? "Enabled" : "Disabled");
}
static int __devinit hdmi_msm_probe(struct platform_device *pdev)
{
int rc;
struct platform_device *fb_dev;
if (!hdmi_msm_state) {
pr_err("%s: hdmi_msm_state is NULL\n", __func__);
return -ENOMEM;
}
external_common_state->dev = &pdev->dev;
DEV_DBG("probe\n");
if (pdev->id == 0) {
struct resource *res;
#define GET_RES(name, mode) do { \
res = platform_get_resource_byname(pdev, mode, name); \
if (!res) { \
DEV_ERR("'" name "' resource not found\n"); \
rc = -ENODEV; \
goto error; \
} \
} while (0)
#define IO_REMAP(var, name) do { \
GET_RES(name, IORESOURCE_MEM); \
var = ioremap(res->start, resource_size(res)); \
if (!var) { \
DEV_ERR("'" name "' ioremap failed\n"); \
rc = -ENOMEM; \
goto error; \
} \
} while (0)
#define GET_IRQ(var, name) do { \
GET_RES(name, IORESOURCE_IRQ); \
var = res->start; \
} while (0)
IO_REMAP(hdmi_msm_state->qfprom_io, "hdmi_msm_qfprom_addr");
hdmi_msm_state->hdmi_io = MSM_HDMI_BASE;
GET_IRQ(hdmi_msm_state->irq, "hdmi_msm_irq");
hdmi_msm_state->pd = pdev->dev.platform_data;
#undef GET_RES
#undef IO_REMAP
#undef GET_IRQ
return 0;
}
hdmi_msm_state->hdmi_app_clk = clk_get(&pdev->dev, "core_clk");
if (IS_ERR(hdmi_msm_state->hdmi_app_clk)) {
DEV_ERR("'core_clk' clk not found\n");
rc = IS_ERR(hdmi_msm_state->hdmi_app_clk);
goto error;
}
hdmi_msm_state->hdmi_m_pclk = clk_get(&pdev->dev, "master_iface_clk");
if (IS_ERR(hdmi_msm_state->hdmi_m_pclk)) {
DEV_ERR("'master_iface_clk' clk not found\n");
rc = IS_ERR(hdmi_msm_state->hdmi_m_pclk);
goto error;
}
hdmi_msm_state->hdmi_s_pclk = clk_get(&pdev->dev, "slave_iface_clk");
if (IS_ERR(hdmi_msm_state->hdmi_s_pclk)) {
DEV_ERR("'slave_iface_clk' clk not found\n");
rc = IS_ERR(hdmi_msm_state->hdmi_s_pclk);
goto error;
}
hdmi_msm_state->is_mhl_enabled = hdmi_msm_state->pd->is_mhl_enabled;
rc = check_hdmi_features();
if (rc) {
DEV_ERR("Init FAILED: check_hdmi_features rc=%d\n", rc);
goto error;
}
if (!hdmi_msm_state->pd->core_power) {
DEV_ERR("Init FAILED: core_power function missing\n");
rc = -ENODEV;
goto error;
}
if (!hdmi_msm_state->pd->enable_5v) {
DEV_ERR("Init FAILED: enable_5v function missing\n");
rc = -ENODEV;
goto error;
}
if (!hdmi_msm_state->pd->cec_power) {
DEV_ERR("Init FAILED: cec_power function missing\n");
rc = -ENODEV;
goto error;
}
rc = request_threaded_irq(hdmi_msm_state->irq, NULL, &hdmi_msm_isr,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "hdmi_msm_isr", NULL);
if (rc) {
DEV_ERR("Init FAILED: IRQ request, rc=%d\n", rc);
goto error;
}
disable_irq(hdmi_msm_state->irq);
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
init_timer(&hdmi_msm_state->cec_read_timer);
hdmi_msm_state->cec_read_timer.function =
hdmi_msm_cec_read_timer_func;
hdmi_msm_state->cec_read_timer.data = (uint32)NULL;
hdmi_msm_state->cec_read_timer.expires = 0xffffffffL;
#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT */
fb_dev = msm_fb_add_device(pdev);
if (fb_dev) {
rc = external_common_state_create(fb_dev);
if (rc) {
DEV_ERR("Init FAILED: hdmi_msm_state_create, rc=%d\n",
rc);
goto error;
}
} else
DEV_ERR("Init FAILED: failed to add fb device\n");
if (hdmi_prim_display) {
rc = hdmi_msm_hpd_on();
if (rc)
goto error;
}
hdmi_msm_config_hdcp_feature();
/* Initialize hdmi node and register with switch driver */
if (hdmi_prim_display)
external_common_state->sdev.name = "hdmi_as_primary";
else
external_common_state->sdev.name = "hdmi";
if (switch_dev_register(&external_common_state->sdev) < 0) {
DEV_ERR("Hdmi switch registration failed\n");
rc = -ENODEV;
goto error;
}
external_common_state->audio_sdev.name = "hdmi_audio";
if (switch_dev_register(&external_common_state->audio_sdev) < 0) {
DEV_ERR("Hdmi audio switch registration failed\n");
switch_dev_unregister(&external_common_state->sdev);
rc = -ENODEV;
goto error;
}
return 0;
error:
if (hdmi_msm_state->qfprom_io)
iounmap(hdmi_msm_state->qfprom_io);
hdmi_msm_state->qfprom_io = NULL;
if (hdmi_msm_state->hdmi_io)
iounmap(hdmi_msm_state->hdmi_io);
hdmi_msm_state->hdmi_io = NULL;
external_common_state_remove();
if (hdmi_msm_state->hdmi_app_clk)
clk_put(hdmi_msm_state->hdmi_app_clk);
if (hdmi_msm_state->hdmi_m_pclk)
clk_put(hdmi_msm_state->hdmi_m_pclk);
if (hdmi_msm_state->hdmi_s_pclk)
clk_put(hdmi_msm_state->hdmi_s_pclk);
hdmi_msm_state->hdmi_app_clk = NULL;
hdmi_msm_state->hdmi_m_pclk = NULL;
hdmi_msm_state->hdmi_s_pclk = NULL;
return rc;
}
static int __devexit hdmi_msm_remove(struct platform_device *pdev)
{
DEV_INFO("HDMI device: remove\n");
DEV_INFO("HDMI HPD: OFF\n");
/* Unregister hdmi node from switch driver */
switch_dev_unregister(&external_common_state->sdev);
switch_dev_unregister(&external_common_state->audio_sdev);
hdmi_msm_hpd_off();
free_irq(hdmi_msm_state->irq, NULL);
if (hdmi_msm_state->qfprom_io)
iounmap(hdmi_msm_state->qfprom_io);
hdmi_msm_state->qfprom_io = NULL;
if (hdmi_msm_state->hdmi_io)
iounmap(hdmi_msm_state->hdmi_io);
hdmi_msm_state->hdmi_io = NULL;
external_common_state_remove();
if (hdmi_msm_state->hdmi_app_clk)
clk_put(hdmi_msm_state->hdmi_app_clk);
if (hdmi_msm_state->hdmi_m_pclk)
clk_put(hdmi_msm_state->hdmi_m_pclk);
if (hdmi_msm_state->hdmi_s_pclk)
clk_put(hdmi_msm_state->hdmi_s_pclk);
hdmi_msm_state->hdmi_app_clk = NULL;
hdmi_msm_state->hdmi_m_pclk = NULL;
hdmi_msm_state->hdmi_s_pclk = NULL;
kfree(hdmi_msm_state);
hdmi_msm_state = NULL;
return 0;
}
static int hdmi_msm_hpd_feature(int on)
{
int rc = 0;
DEV_INFO("%s: %d\n", __func__, on);
if (on) {
rc = hdmi_msm_hpd_on();
} else {
if (external_common_state->hpd_state) {
external_common_state->hpd_state = 0;
/* Send offline event to switch OFF HDMI and HAL FD */
hdmi_msm_send_event(HPD_EVENT_OFFLINE);
/* Wait for HDMI and FD to close */
INIT_COMPLETION(hdmi_msm_state->hpd_event_processed);
wait_for_completion_interruptible_timeout(
&hdmi_msm_state->hpd_event_processed, HZ);
}
hdmi_msm_hpd_off();
/* Set HDMI switch node to 0 on HPD feature disable */
switch_set_state(&external_common_state->sdev, 0);
DEV_INFO("%s: hdmi state switched to %d\n", __func__,
external_common_state->sdev.state);
}
return rc;
}
static struct platform_driver this_driver = {
.probe = hdmi_msm_probe,
.remove = hdmi_msm_remove,
.driver.name = "hdmi_msm",
};
static struct msm_fb_panel_data hdmi_msm_panel_data = {
.on = hdmi_msm_power_on,
.off = hdmi_msm_power_off,
.power_ctrl = hdmi_msm_power_ctrl,
};
static struct platform_device this_device = {
.name = "hdmi_msm",
.id = 1,
.dev.platform_data = &hdmi_msm_panel_data,
};
static int __init hdmi_msm_init(void)
{
int rc;
if (msm_fb_detect_client("hdmi_msm"))
return 0;
#ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY
hdmi_prim_display = 1;
#endif
hdmi_msm_setup_video_mode_lut();
hdmi_msm_state = kzalloc(sizeof(*hdmi_msm_state), GFP_KERNEL);
if (!hdmi_msm_state) {
pr_err("hdmi_msm_init FAILED: out of memory\n");
rc = -ENOMEM;
goto init_exit;
}
external_common_state = &hdmi_msm_state->common;
if (hdmi_prim_display && hdmi_prim_resolution)
external_common_state->video_resolution =
hdmi_prim_resolution - 1;
else
external_common_state->video_resolution =
HDMI_VFRMT_1920x1080p60_16_9;
#ifdef CONFIG_FB_MSM_HDMI_3D
external_common_state->switch_3d = hdmi_msm_switch_3d;
#endif
memset(external_common_state->spd_vendor_name, 0,
sizeof(external_common_state->spd_vendor_name));
memset(external_common_state->spd_product_description, 0,
sizeof(external_common_state->spd_product_description));
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
hdmi_msm_state->cec_queue_start =
kzalloc(sizeof(struct hdmi_msm_cec_msg)*CEC_QUEUE_SIZE,
GFP_KERNEL);
if (!hdmi_msm_state->cec_queue_start) {
pr_err("hdmi_msm_init FAILED: CEC queue out of memory\n");
rc = -ENOMEM;
goto init_exit;
}
hdmi_msm_state->cec_queue_wr = hdmi_msm_state->cec_queue_start;
hdmi_msm_state->cec_queue_rd = hdmi_msm_state->cec_queue_start;
hdmi_msm_state->cec_queue_full = false;
#endif
/*
* Create your work queue
* allocs and returns ptr
*/
hdmi_work_queue = create_workqueue("hdmi_hdcp");
external_common_state->hpd_feature = hdmi_msm_hpd_feature;
rc = platform_driver_register(&this_driver);
if (rc) {
pr_err("hdmi_msm_init FAILED: platform_driver_register rc=%d\n",
rc);
goto init_exit;
}
hdmi_common_init_panel_info(&hdmi_msm_panel_data.panel_info);
init_completion(&hdmi_msm_state->ddc_sw_done);
init_completion(&hdmi_msm_state->hpd_event_processed);
INIT_WORK(&hdmi_msm_state->hpd_state_work, hdmi_msm_hpd_state_work);
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
INIT_WORK(&hdmi_msm_state->cec_latch_detect_work,
hdmi_msm_cec_latch_work);
init_completion(&hdmi_msm_state->cec_frame_wr_done);
init_completion(&hdmi_msm_state->cec_line_latch_wait);
#endif
rc = platform_device_register(&this_device);
if (rc) {
pr_err("hdmi_msm_init FAILED: platform_device_register rc=%d\n",
rc);
platform_driver_unregister(&this_driver);
goto init_exit;
}
pr_debug("%s: success:"
#ifdef DEBUG
" DEBUG"
#else
" RELEASE"
#endif
" AUDIO EDID HPD HDCP"
" DVI"
#ifndef CONFIG_FB_MSM_HDMI_MSM_PANEL_DVI_SUPPORT
":0"
#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_DVI_SUPPORT */
"\n", __func__);
return 0;
init_exit:
kfree(hdmi_msm_state);
hdmi_msm_state = NULL;
return rc;
}
static void __exit hdmi_msm_exit(void)
{
platform_device_unregister(&this_device);
platform_driver_unregister(&this_driver);
}
static int set_hdcp_feature_on(const char *val, const struct kernel_param *kp)
{
int rv = param_set_bool(val, kp);
if (rv)
return rv;
pr_debug("%s: HDCP feature = %d\n", __func__, hdcp_feature_on);
if (hdmi_msm_state) {
if ((HDMI_INP(0x0250) & 0x2)) {
pr_err("%s: Unable to set HDCP feature", __func__);
pr_err("%s: HDMI panel is currently turned on",
__func__);
} else if (hdcp_feature_on != hdmi_msm_state->hdcp_enable) {
hdmi_msm_config_hdcp_feature();
}
}
return 0;
}
static struct kernel_param_ops hdcp_feature_on_param_ops = {
.set = set_hdcp_feature_on,
.get = param_get_bool,
};
module_param_cb(hdcp, &hdcp_feature_on_param_ops, &hdcp_feature_on,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(hdcp, "Enable or Disable HDCP");
module_init(hdmi_msm_init);
module_exit(hdmi_msm_exit);
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.3");
MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
MODULE_DESCRIPTION("HDMI MSM TX driver");
| Gaojiquan/android_kernel_zte_digger | drivers/video/msm/hdmi_msm.c | C | gpl-2.0 | 145,025 |
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#include "errno-util.h"
#include "format-table.h"
#include "hexdecoct.h"
#include "homectl-pkcs11.h"
#include "libcrypt-util.h"
#include "memory-util.h"
#include "openssl-util.h"
#include "pkcs11-util.h"
#include "random-util.h"
#include "strv.h"
struct pkcs11_callback_data {
char *pin_used;
X509 *cert;
};
#if HAVE_P11KIT
static void pkcs11_callback_data_release(struct pkcs11_callback_data *data) {
erase_and_free(data->pin_used);
X509_free(data->cert);
}
static int pkcs11_callback(
CK_FUNCTION_LIST *m,
CK_SESSION_HANDLE session,
CK_SLOT_ID slot_id,
const CK_SLOT_INFO *slot_info,
const CK_TOKEN_INFO *token_info,
P11KitUri *uri,
void *userdata) {
_cleanup_(erase_and_freep) char *pin_used = NULL;
struct pkcs11_callback_data *data = userdata;
CK_OBJECT_HANDLE object;
int r;
assert(m);
assert(slot_info);
assert(token_info);
assert(uri);
assert(data);
/* Called for every token matching our URI */
r = pkcs11_token_login(m, session, slot_id, token_info, "home directory operation", "user-home", "pkcs11-pin", UINT64_MAX, &pin_used);
if (r < 0)
return r;
r = pkcs11_token_find_x509_certificate(m, session, uri, &object);
if (r < 0)
return r;
r = pkcs11_token_read_x509_certificate(m, session, object, &data->cert);
if (r < 0)
return r;
/* Let's read some random data off the token and write it to the kernel pool before we generate our
* random key from it. This way we can claim the quality of the RNG is at least as good as the
* kernel's and the token's pool */
(void) pkcs11_token_acquire_rng(m, session);
data->pin_used = TAKE_PTR(pin_used);
return 1;
}
#endif
static int acquire_pkcs11_certificate(
const char *uri,
X509 **ret_cert,
char **ret_pin_used) {
#if HAVE_P11KIT
_cleanup_(pkcs11_callback_data_release) struct pkcs11_callback_data data = {};
int r;
r = pkcs11_find_token(uri, pkcs11_callback, &data);
if (r == -EAGAIN) /* pkcs11_find_token() doesn't log about this error, but all others */
return log_error_errno(SYNTHETIC_ERRNO(ENXIO),
"Specified PKCS#11 token with URI '%s' not found.",
uri);
if (r < 0)
return r;
*ret_cert = TAKE_PTR(data.cert);
*ret_pin_used = TAKE_PTR(data.pin_used);
return 0;
#else
return log_error_errno(SYNTHETIC_ERRNO(EOPNOTSUPP),
"PKCS#11 tokens not supported on this build.");
#endif
}
static int encrypt_bytes(
EVP_PKEY *pkey,
const void *decrypted_key,
size_t decrypted_key_size,
void **ret_encrypt_key,
size_t *ret_encrypt_key_size) {
_cleanup_(EVP_PKEY_CTX_freep) EVP_PKEY_CTX *ctx = NULL;
_cleanup_free_ void *b = NULL;
size_t l;
ctx = EVP_PKEY_CTX_new(pkey, NULL);
if (!ctx)
return log_error_errno(SYNTHETIC_ERRNO(EIO), "Failed to allocate public key context");
if (EVP_PKEY_encrypt_init(ctx) <= 0)
return log_error_errno(SYNTHETIC_ERRNO(EIO), "Failed to initialize public key context");
if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING) <= 0)
return log_error_errno(SYNTHETIC_ERRNO(EIO), "Failed to configure PKCS#1 padding");
if (EVP_PKEY_encrypt(ctx, NULL, &l, decrypted_key, decrypted_key_size) <= 0)
return log_error_errno(SYNTHETIC_ERRNO(EIO), "Failed to determine encrypted key size");
b = malloc(l);
if (!b)
return log_oom();
if (EVP_PKEY_encrypt(ctx, b, &l, decrypted_key, decrypted_key_size) <= 0)
return log_error_errno(SYNTHETIC_ERRNO(EIO), "Failed to determine encrypted key size");
*ret_encrypt_key = TAKE_PTR(b);
*ret_encrypt_key_size = l;
return 0;
}
static int add_pkcs11_encrypted_key(
JsonVariant **v,
const char *uri,
const void *encrypted_key, size_t encrypted_key_size,
const void *decrypted_key, size_t decrypted_key_size) {
_cleanup_(json_variant_unrefp) JsonVariant *l = NULL, *w = NULL, *e = NULL;
_cleanup_(erase_and_freep) char *base64_encoded = NULL, *hashed = NULL;
int r;
assert(v);
assert(uri);
assert(encrypted_key);
assert(encrypted_key_size > 0);
assert(decrypted_key);
assert(decrypted_key_size > 0);
/* Before using UNIX hashing on the supplied key we base64 encode it, since crypt_r() and friends
* expect a NUL terminated string, and we use a binary key */
r = base64mem(decrypted_key, decrypted_key_size, &base64_encoded);
if (r < 0)
return log_error_errno(r, "Failed to base64 encode secret key: %m");
r = hash_password(base64_encoded, &hashed);
if (r < 0)
return log_error_errno(errno_or_else(EINVAL), "Failed to UNIX hash secret key: %m");
r = json_build(&e, JSON_BUILD_OBJECT(
JSON_BUILD_PAIR("uri", JSON_BUILD_STRING(uri)),
JSON_BUILD_PAIR("data", JSON_BUILD_BASE64(encrypted_key, encrypted_key_size)),
JSON_BUILD_PAIR("hashedPassword", JSON_BUILD_STRING(hashed))));
if (r < 0)
return log_error_errno(r, "Failed to build encrypted JSON key object: %m");
w = json_variant_ref(json_variant_by_key(*v, "privileged"));
l = json_variant_ref(json_variant_by_key(w, "pkcs11EncryptedKey"));
r = json_variant_append_array(&l, e);
if (r < 0)
return log_error_errno(r, "Failed append PKCS#11 encrypted key: %m");
r = json_variant_set_field(&w, "pkcs11EncryptedKey", l);
if (r < 0)
return log_error_errno(r, "Failed to set PKCS#11 encrypted key: %m");
r = json_variant_set_field(v, "privileged", w);
if (r < 0)
return log_error_errno(r, "Failed to update privileged field: %m");
return 0;
}
static int add_pkcs11_token_uri(JsonVariant **v, const char *uri) {
_cleanup_(json_variant_unrefp) JsonVariant *w = NULL;
_cleanup_strv_free_ char **l = NULL;
int r;
assert(v);
assert(uri);
w = json_variant_ref(json_variant_by_key(*v, "pkcs11TokenUri"));
if (w) {
r = json_variant_strv(w, &l);
if (r < 0)
return log_error_errno(r, "Failed to parse PKCS#11 token list: %m");
if (strv_contains(l, uri))
return 0;
}
r = strv_extend(&l, uri);
if (r < 0)
return log_oom();
w = json_variant_unref(w);
r = json_variant_new_array_strv(&w, l);
if (r < 0)
return log_error_errno(r, "Failed to create PKCS#11 token URI JSON: %m");
r = json_variant_set_field(v, "pkcs11TokenUri", w);
if (r < 0)
return log_error_errno(r, "Failed to update PKCS#11 token URI list: %m");
return 0;
}
int identity_add_token_pin(JsonVariant **v, const char *pin) {
_cleanup_(json_variant_unrefp) JsonVariant *w = NULL, *l = NULL;
_cleanup_(strv_free_erasep) char **pins = NULL;
int r;
assert(v);
if (isempty(pin))
return 0;
w = json_variant_ref(json_variant_by_key(*v, "secret"));
l = json_variant_ref(json_variant_by_key(w, "tokenPin"));
r = json_variant_strv(l, &pins);
if (r < 0)
return log_error_errno(r, "Failed to convert PIN array: %m");
if (strv_find(pins, pin))
return 0;
r = strv_extend(&pins, pin);
if (r < 0)
return log_oom();
strv_uniq(pins);
l = json_variant_unref(l);
r = json_variant_new_array_strv(&l, pins);
if (r < 0)
return log_error_errno(r, "Failed to allocate new PIN array JSON: %m");
json_variant_sensitive(l);
r = json_variant_set_field(&w, "tokenPin", l);
if (r < 0)
return log_error_errno(r, "Failed to update PIN field: %m");
r = json_variant_set_field(v, "secret", w);
if (r < 0)
return log_error_errno(r, "Failed to update secret object: %m");
return 1;
}
int identity_add_pkcs11_key_data(JsonVariant **v, const char *uri) {
_cleanup_(erase_and_freep) void *decrypted_key = NULL, *encrypted_key = NULL;
_cleanup_(erase_and_freep) char *pin = NULL;
size_t decrypted_key_size, encrypted_key_size;
_cleanup_(X509_freep) X509 *cert = NULL;
EVP_PKEY *pkey;
RSA *rsa;
int bits;
int r;
assert(v);
r = acquire_pkcs11_certificate(uri, &cert, &pin);
if (r < 0)
return r;
pkey = X509_get0_pubkey(cert);
if (!pkey)
return log_error_errno(SYNTHETIC_ERRNO(EIO), "Failed to extract public key from X.509 certificate.");
if (EVP_PKEY_base_id(pkey) != EVP_PKEY_RSA)
return log_error_errno(SYNTHETIC_ERRNO(EBADMSG), "X.509 certificate does not refer to RSA key.");
rsa = EVP_PKEY_get0_RSA(pkey);
if (!rsa)
return log_error_errno(SYNTHETIC_ERRNO(EIO), "Failed to acquire RSA public key from X.509 certificate.");
bits = RSA_bits(rsa);
log_debug("Bits in RSA key: %i", bits);
/* We use PKCS#1 padding for the RSA cleartext, hence let's leave some extra space for it, hence only
* generate a random key half the size of the RSA length */
decrypted_key_size = bits / 8 / 2;
if (decrypted_key_size < 1)
return log_error_errno(SYNTHETIC_ERRNO(EIO), "Uh, RSA key size too short?");
log_debug("Generating %zu bytes random key.", decrypted_key_size);
decrypted_key = malloc(decrypted_key_size);
if (!decrypted_key)
return log_oom();
r = genuine_random_bytes(decrypted_key, decrypted_key_size, RANDOM_BLOCK);
if (r < 0)
return log_error_errno(r, "Failed to generate random key: %m");
r = encrypt_bytes(pkey, decrypted_key, decrypted_key_size, &encrypted_key, &encrypted_key_size);
if (r < 0)
return log_error_errno(r, "Failed to encrypt key: %m");
/* Add the token URI to the public part of the record. */
r = add_pkcs11_token_uri(v, uri);
if (r < 0)
return r;
/* Include the encrypted version of the random key we just generated in the privileged part of the record */
r = add_pkcs11_encrypted_key(
v,
uri,
encrypted_key, encrypted_key_size,
decrypted_key, decrypted_key_size);
if (r < 0)
return r;
/* If we acquired the PIN also include it in the secret section of the record, so that systemd-homed
* can use it if it needs to, given that it likely needs to decrypt the key again to pass to LUKS or
* fscrypt. */
r = identity_add_token_pin(v, pin);
if (r < 0)
return r;
return 0;
}
#if HAVE_P11KIT
static int list_callback(
CK_FUNCTION_LIST *m,
CK_SESSION_HANDLE session,
CK_SLOT_ID slot_id,
const CK_SLOT_INFO *slot_info,
const CK_TOKEN_INFO *token_info,
P11KitUri *uri,
void *userdata) {
_cleanup_free_ char *token_uri_string = NULL, *token_label = NULL, *token_manufacturer_id = NULL, *token_model = NULL;
_cleanup_(p11_kit_uri_freep) P11KitUri *token_uri = NULL;
Table *t = userdata;
int uri_result, r;
assert(slot_info);
assert(token_info);
/* We only care about hardware devices here with a token inserted. Let's filter everything else
* out. (Note that the user can explicitly specify non-hardware tokens if they like, but during
* enumeration we'll filter those, since software tokens are typically the system certificate store
* and such, and it's typically not what people want to bind their home directories to.) */
if (!FLAGS_SET(token_info->flags, CKF_HW_SLOT|CKF_TOKEN_PRESENT))
return -EAGAIN;
token_label = pkcs11_token_label(token_info);
if (!token_label)
return log_oom();
token_manufacturer_id = pkcs11_token_manufacturer_id(token_info);
if (!token_manufacturer_id)
return log_oom();
token_model = pkcs11_token_model(token_info);
if (!token_model)
return log_oom();
token_uri = uri_from_token_info(token_info);
if (!token_uri)
return log_oom();
uri_result = p11_kit_uri_format(token_uri, P11_KIT_URI_FOR_ANY, &token_uri_string);
if (uri_result != P11_KIT_URI_OK)
return log_warning_errno(SYNTHETIC_ERRNO(EAGAIN), "Failed to format slot URI: %s", p11_kit_uri_message(uri_result));
r = table_add_many(
t,
TABLE_STRING, token_uri_string,
TABLE_STRING, token_label,
TABLE_STRING, token_manufacturer_id,
TABLE_STRING, token_model);
if (r < 0)
return table_log_add_error(r);
return -EAGAIN; /* keep scanning */
}
#endif
int list_pkcs11_tokens(void) {
#if HAVE_P11KIT
_cleanup_(table_unrefp) Table *t = NULL;
int r;
t = table_new("uri", "label", "manufacturer", "model");
if (!t)
return log_oom();
r = pkcs11_find_token(NULL, list_callback, t);
if (r < 0 && r != -EAGAIN)
return r;
if (table_get_rows(t) <= 1) {
log_info("No suitable PKCS#11 tokens found.");
return 0;
}
r = table_print(t, stdout);
if (r < 0)
return log_error_errno(r, "Failed to show device table: %m");
return 0;
#else
return log_error_errno(SYNTHETIC_ERRNO(EOPNOTSUPP),
"PKCS#11 tokens not supported on this build.");
#endif
}
#if HAVE_P11KIT
static int auto_callback(
CK_FUNCTION_LIST *m,
CK_SESSION_HANDLE session,
CK_SLOT_ID slot_id,
const CK_SLOT_INFO *slot_info,
const CK_TOKEN_INFO *token_info,
P11KitUri *uri,
void *userdata) {
_cleanup_(p11_kit_uri_freep) P11KitUri *token_uri = NULL;
char **t = userdata;
int uri_result;
assert(slot_info);
assert(token_info);
if (!FLAGS_SET(token_info->flags, CKF_HW_SLOT|CKF_TOKEN_PRESENT))
return -EAGAIN;
if (*t)
return log_error_errno(SYNTHETIC_ERRNO(ENOTUNIQ),
"More than one suitable PKCS#11 token found.");
token_uri = uri_from_token_info(token_info);
if (!token_uri)
return log_oom();
uri_result = p11_kit_uri_format(token_uri, P11_KIT_URI_FOR_ANY, t);
if (uri_result != P11_KIT_URI_OK)
return log_warning_errno(SYNTHETIC_ERRNO(EAGAIN), "Failed to format slot URI: %s", p11_kit_uri_message(uri_result));
return 0;
}
#endif
int find_pkcs11_token_auto(char **ret) {
#if HAVE_P11KIT
int r;
r = pkcs11_find_token(NULL, auto_callback, ret);
if (r == -EAGAIN)
return log_error_errno(SYNTHETIC_ERRNO(ENODEV), "No suitable PKCS#11 tokens found.");
if (r < 0)
return r;
return 0;
#else
return log_error_errno(SYNTHETIC_ERRNO(EOPNOTSUPP),
"PKCS#11 tokens not supported on this build.");
#endif
}
| endlessm/systemd | src/home/homectl-pkcs11.c | C | gpl-2.0 | 16,567 |
/* $Id: elsa_ser.c,v 1.1.1.1 2011/08/19 02:08:59 ronald Exp $
*
* stuff for the serial modem on ELSA cards
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
#define MAX_MODEM_BUF 256
#define WAKEUP_CHARS (MAX_MODEM_BUF/2)
#define RS_ISR_PASS_LIMIT 256
#define BASE_BAUD ( 1843200 / 16 )
//#define SERIAL_DEBUG_OPEN 1
//#define SERIAL_DEBUG_INTR 1
//#define SERIAL_DEBUG_FLOW 1
#undef SERIAL_DEBUG_OPEN
#undef SERIAL_DEBUG_INTR
#undef SERIAL_DEBUG_FLOW
#undef SERIAL_DEBUG_REG
//#define SERIAL_DEBUG_REG 1
#ifdef SERIAL_DEBUG_REG
static u_char deb[32];
const char *ModemIn[] = {"RBR","IER","IIR","LCR","MCR","LSR","MSR","SCR"};
const char *ModemOut[] = {"THR","IER","FCR","LCR","MCR","LSR","MSR","SCR"};
#endif
static char *MInit_1 = "AT&F&C1E0&D2\r\0";
static char *MInit_2 = "ATL2M1S64=13\r\0";
static char *MInit_3 = "AT+FCLASS=0\r\0";
static char *MInit_4 = "ATV1S2=128X1\r\0";
static char *MInit_5 = "AT\\V8\\N3\r\0";
static char *MInit_6 = "ATL0M0&G0%E1\r\0";
static char *MInit_7 = "AT%L1%M0%C3\r\0";
static char *MInit_speed28800 = "AT%G0%B28800\r\0";
static char *MInit_dialout = "ATs7=60 x1 d\r\0";
static char *MInit_dialin = "ATs7=60 x1 a\r\0";
static inline unsigned int serial_in(struct IsdnCardState *cs, int offset)
{
#ifdef SERIAL_DEBUG_REG
u_int val = inb(cs->hw.elsa.base + 8 + offset);
debugl1(cs,"in %s %02x",ModemIn[offset], val);
return(val);
#else
return inb(cs->hw.elsa.base + 8 + offset);
#endif
}
static inline unsigned int serial_inp(struct IsdnCardState *cs, int offset)
{
#ifdef SERIAL_DEBUG_REG
#ifdef ELSA_SERIAL_NOPAUSE_IO
u_int val = inb(cs->hw.elsa.base + 8 + offset);
debugl1(cs,"inp %s %02x",ModemIn[offset], val);
#else
u_int val = inb_p(cs->hw.elsa.base + 8 + offset);
debugl1(cs,"inP %s %02x",ModemIn[offset], val);
#endif
return(val);
#else
#ifdef ELSA_SERIAL_NOPAUSE_IO
return inb(cs->hw.elsa.base + 8 + offset);
#else
return inb_p(cs->hw.elsa.base + 8 + offset);
#endif
#endif
}
static inline void serial_out(struct IsdnCardState *cs, int offset, int value)
{
#ifdef SERIAL_DEBUG_REG
debugl1(cs,"out %s %02x",ModemOut[offset], value);
#endif
outb(value, cs->hw.elsa.base + 8 + offset);
}
static inline void serial_outp(struct IsdnCardState *cs, int offset,
int value)
{
#ifdef SERIAL_DEBUG_REG
#ifdef ELSA_SERIAL_NOPAUSE_IO
debugl1(cs,"outp %s %02x",ModemOut[offset], value);
#else
debugl1(cs,"outP %s %02x",ModemOut[offset], value);
#endif
#endif
#ifdef ELSA_SERIAL_NOPAUSE_IO
outb(value, cs->hw.elsa.base + 8 + offset);
#else
outb_p(value, cs->hw.elsa.base + 8 + offset);
#endif
}
/*
* This routine is called to set the UART divisor registers to match
* the specified baud rate for a serial port.
*/
static void change_speed(struct IsdnCardState *cs, int baud)
{
int quot = 0, baud_base;
unsigned cval, fcr = 0;
int bits;
/* byte size and parity */
cval = 0x03; bits = 10;
/* Determine divisor based on baud rate */
baud_base = BASE_BAUD;
quot = baud_base / baud;
/* If the quotient is ever zero, default to 9600 bps */
if (!quot)
quot = baud_base / 9600;
/* Set up FIFO's */
if ((baud_base / quot) < 2400)
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
else
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_8;
serial_outp(cs, UART_FCR, fcr);
/* CTS flow control flag and modem status interrupts */
cs->hw.elsa.IER &= ~UART_IER_MSI;
cs->hw.elsa.IER |= UART_IER_MSI;
serial_outp(cs, UART_IER, cs->hw.elsa.IER);
debugl1(cs,"modem quot=0x%x", quot);
serial_outp(cs, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
serial_outp(cs, UART_DLL, quot & 0xff); /* LS of divisor */
serial_outp(cs, UART_DLM, quot >> 8); /* MS of divisor */
serial_outp(cs, UART_LCR, cval); /* reset DLAB */
serial_inp(cs, UART_RX);
}
static int mstartup(struct IsdnCardState *cs)
{
int retval=0;
/*
* Clear the FIFO buffers and disable them
* (they will be reenabled in change_speed())
*/
serial_outp(cs, UART_FCR, (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT));
/*
* At this point there's no way the LSR could still be 0xFF;
* if it is, then bail out, because there's likely no UART
* here.
*/
if (serial_inp(cs, UART_LSR) == 0xff) {
retval = -ENODEV;
goto errout;
}
/*
* Clear the interrupt registers.
*/
(void) serial_inp(cs, UART_RX);
(void) serial_inp(cs, UART_IIR);
(void) serial_inp(cs, UART_MSR);
/*
* Now, initialize the UART
*/
serial_outp(cs, UART_LCR, UART_LCR_WLEN8); /* reset DLAB */
cs->hw.elsa.MCR = 0;
cs->hw.elsa.MCR = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2;
serial_outp(cs, UART_MCR, cs->hw.elsa.MCR);
/*
* Finally, enable interrupts
*/
cs->hw.elsa.IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
serial_outp(cs, UART_IER, cs->hw.elsa.IER); /* enable interrupts */
/*
* And clear the interrupt registers again for luck.
*/
(void)serial_inp(cs, UART_LSR);
(void)serial_inp(cs, UART_RX);
(void)serial_inp(cs, UART_IIR);
(void)serial_inp(cs, UART_MSR);
cs->hw.elsa.transcnt = cs->hw.elsa.transp = 0;
cs->hw.elsa.rcvcnt = cs->hw.elsa.rcvp =0;
/*
* and set the speed of the serial port
*/
change_speed(cs, BASE_BAUD);
cs->hw.elsa.MFlag = 1;
errout:
return retval;
}
/*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on.
*/
static void mshutdown(struct IsdnCardState *cs)
{
#ifdef SERIAL_DEBUG_OPEN
printk(KERN_DEBUG"Shutting down serial ....");
#endif
/*
* clear delta_msr_wait queue to avoid mem leaks: we may free the irq
* here so the queue might never be waken up
*/
cs->hw.elsa.IER = 0;
serial_outp(cs, UART_IER, 0x00); /* disable all intrs */
cs->hw.elsa.MCR &= ~UART_MCR_OUT2;
/* disable break condition */
serial_outp(cs, UART_LCR, serial_inp(cs, UART_LCR) & ~UART_LCR_SBC);
cs->hw.elsa.MCR &= ~(UART_MCR_DTR|UART_MCR_RTS);
serial_outp(cs, UART_MCR, cs->hw.elsa.MCR);
/* disable FIFO's */
serial_outp(cs, UART_FCR, (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT));
serial_inp(cs, UART_RX); /* read data port to reset things */
#ifdef SERIAL_DEBUG_OPEN
printk(" done\n");
#endif
}
static inline int
write_modem(struct BCState *bcs) {
int ret=0;
struct IsdnCardState *cs = bcs->cs;
int count, len, fp;
if (!bcs->tx_skb)
return 0;
if (bcs->tx_skb->len <= 0)
return 0;
len = bcs->tx_skb->len;
if (len > MAX_MODEM_BUF - cs->hw.elsa.transcnt)
len = MAX_MODEM_BUF - cs->hw.elsa.transcnt;
fp = cs->hw.elsa.transcnt + cs->hw.elsa.transp;
fp &= (MAX_MODEM_BUF -1);
count = len;
if (count > MAX_MODEM_BUF - fp) {
count = MAX_MODEM_BUF - fp;
skb_copy_from_linear_data(bcs->tx_skb,
cs->hw.elsa.transbuf + fp, count);
skb_pull(bcs->tx_skb, count);
cs->hw.elsa.transcnt += count;
ret = count;
count = len - count;
fp = 0;
}
skb_copy_from_linear_data(bcs->tx_skb,
cs->hw.elsa.transbuf + fp, count);
skb_pull(bcs->tx_skb, count);
cs->hw.elsa.transcnt += count;
ret += count;
if (cs->hw.elsa.transcnt &&
!(cs->hw.elsa.IER & UART_IER_THRI)) {
cs->hw.elsa.IER |= UART_IER_THRI;
serial_outp(cs, UART_IER, cs->hw.elsa.IER);
}
return(ret);
}
static inline void
modem_fill(struct BCState *bcs) {
if (bcs->tx_skb) {
if (bcs->tx_skb->len) {
write_modem(bcs);
return;
} else {
if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
(PACKET_NOACK != bcs->tx_skb->pkt_type)) {
u_long flags;
spin_lock_irqsave(&bcs->aclock, flags);
bcs->ackcnt += bcs->hw.hscx.count;
spin_unlock_irqrestore(&bcs->aclock, flags);
schedule_event(bcs, B_ACKPENDING);
}
dev_kfree_skb_any(bcs->tx_skb);
bcs->tx_skb = NULL;
}
}
if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
bcs->hw.hscx.count = 0;
test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
write_modem(bcs);
} else {
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
schedule_event(bcs, B_XMTBUFREADY);
}
}
static inline void receive_chars(struct IsdnCardState *cs,
int *status)
{
unsigned char ch;
struct sk_buff *skb;
do {
ch = serial_in(cs, UART_RX);
if (cs->hw.elsa.rcvcnt >= MAX_MODEM_BUF)
break;
cs->hw.elsa.rcvbuf[cs->hw.elsa.rcvcnt++] = ch;
#ifdef SERIAL_DEBUG_INTR
printk("DR%02x:%02x...", ch, *status);
#endif
if (*status & (UART_LSR_BI | UART_LSR_PE |
UART_LSR_FE | UART_LSR_OE)) {
#ifdef SERIAL_DEBUG_INTR
printk("handling exept....");
#endif
}
*status = serial_inp(cs, UART_LSR);
} while (*status & UART_LSR_DR);
if (cs->hw.elsa.MFlag == 2) {
if (!(skb = dev_alloc_skb(cs->hw.elsa.rcvcnt)))
printk(KERN_WARNING "ElsaSER: receive out of memory\n");
else {
memcpy(skb_put(skb, cs->hw.elsa.rcvcnt), cs->hw.elsa.rcvbuf,
cs->hw.elsa.rcvcnt);
skb_queue_tail(& cs->hw.elsa.bcs->rqueue, skb);
}
schedule_event(cs->hw.elsa.bcs, B_RCVBUFREADY);
} else {
char tmp[128];
char *t = tmp;
t += sprintf(t, "modem read cnt %d", cs->hw.elsa.rcvcnt);
QuickHex(t, cs->hw.elsa.rcvbuf, cs->hw.elsa.rcvcnt);
debugl1(cs, tmp);
}
cs->hw.elsa.rcvcnt = 0;
}
static inline void transmit_chars(struct IsdnCardState *cs, int *intr_done)
{
int count;
debugl1(cs, "transmit_chars: p(%x) cnt(%x)", cs->hw.elsa.transp,
cs->hw.elsa.transcnt);
if (cs->hw.elsa.transcnt <= 0) {
cs->hw.elsa.IER &= ~UART_IER_THRI;
serial_out(cs, UART_IER, cs->hw.elsa.IER);
return;
}
count = 16;
do {
serial_outp(cs, UART_TX, cs->hw.elsa.transbuf[cs->hw.elsa.transp++]);
if (cs->hw.elsa.transp >= MAX_MODEM_BUF)
cs->hw.elsa.transp=0;
if (--cs->hw.elsa.transcnt <= 0)
break;
} while (--count > 0);
if ((cs->hw.elsa.transcnt < WAKEUP_CHARS) && (cs->hw.elsa.MFlag==2))
modem_fill(cs->hw.elsa.bcs);
#ifdef SERIAL_DEBUG_INTR
printk("THRE...");
#endif
if (intr_done)
*intr_done = 0;
if (cs->hw.elsa.transcnt <= 0) {
cs->hw.elsa.IER &= ~UART_IER_THRI;
serial_outp(cs, UART_IER, cs->hw.elsa.IER);
}
}
static void rs_interrupt_elsa(struct IsdnCardState *cs)
{
int status, iir, msr;
int pass_counter = 0;
#ifdef SERIAL_DEBUG_INTR
printk(KERN_DEBUG "rs_interrupt_single(%d)...", cs->irq);
#endif
do {
status = serial_inp(cs, UART_LSR);
debugl1(cs,"rs LSR %02x", status);
#ifdef SERIAL_DEBUG_INTR
printk("status = %x...", status);
#endif
if (status & UART_LSR_DR)
receive_chars(cs, &status);
if (status & UART_LSR_THRE)
transmit_chars(cs, NULL);
if (pass_counter++ > RS_ISR_PASS_LIMIT) {
printk("rs_single loop break.\n");
break;
}
iir = serial_inp(cs, UART_IIR);
debugl1(cs,"rs IIR %02x", iir);
if ((iir & 0xf) == 0) {
msr = serial_inp(cs, UART_MSR);
debugl1(cs,"rs MSR %02x", msr);
}
} while (!(iir & UART_IIR_NO_INT));
#ifdef SERIAL_DEBUG_INTR
printk("end.\n");
#endif
}
extern int open_hscxstate(struct IsdnCardState *cs, struct BCState *bcs);
extern void modehscx(struct BCState *bcs, int mode, int bc);
extern void hscx_l2l1(struct PStack *st, int pr, void *arg);
static void
close_elsastate(struct BCState *bcs)
{
modehscx(bcs, 0, bcs->channel);
if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
if (bcs->hw.hscx.rcvbuf) {
if (bcs->mode != L1_MODE_MODEM)
kfree(bcs->hw.hscx.rcvbuf);
bcs->hw.hscx.rcvbuf = NULL;
}
skb_queue_purge(&bcs->rqueue);
skb_queue_purge(&bcs->squeue);
if (bcs->tx_skb) {
dev_kfree_skb_any(bcs->tx_skb);
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
}
}
}
static void
modem_write_cmd(struct IsdnCardState *cs, u_char *buf, int len) {
int count, fp;
u_char *msg = buf;
if (!len)
return;
if (len > (MAX_MODEM_BUF - cs->hw.elsa.transcnt)) {
return;
}
fp = cs->hw.elsa.transcnt + cs->hw.elsa.transp;
fp &= (MAX_MODEM_BUF -1);
count = len;
if (count > MAX_MODEM_BUF - fp) {
count = MAX_MODEM_BUF - fp;
memcpy(cs->hw.elsa.transbuf + fp, msg, count);
cs->hw.elsa.transcnt += count;
msg += count;
count = len - count;
fp = 0;
}
memcpy(cs->hw.elsa.transbuf + fp, msg, count);
cs->hw.elsa.transcnt += count;
if (cs->hw.elsa.transcnt &&
!(cs->hw.elsa.IER & UART_IER_THRI)) {
cs->hw.elsa.IER |= UART_IER_THRI;
serial_outp(cs, UART_IER, cs->hw.elsa.IER);
}
}
static void
modem_set_init(struct IsdnCardState *cs) {
int timeout;
#define RCV_DELAY 20
modem_write_cmd(cs, MInit_1, strlen(MInit_1));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
modem_write_cmd(cs, MInit_2, strlen(MInit_2));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
modem_write_cmd(cs, MInit_3, strlen(MInit_3));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
modem_write_cmd(cs, MInit_4, strlen(MInit_4));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
modem_write_cmd(cs, MInit_5, strlen(MInit_5));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
modem_write_cmd(cs, MInit_6, strlen(MInit_6));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
modem_write_cmd(cs, MInit_7, strlen(MInit_7));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
}
static void
modem_set_dial(struct IsdnCardState *cs, int outgoing) {
int timeout;
#define RCV_DELAY 20
modem_write_cmd(cs, MInit_speed28800, strlen(MInit_speed28800));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
if (outgoing)
modem_write_cmd(cs, MInit_dialout, strlen(MInit_dialout));
else
modem_write_cmd(cs, MInit_dialin, strlen(MInit_dialin));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
}
static void
modem_l2l1(struct PStack *st, int pr, void *arg)
{
struct BCState *bcs = st->l1.bcs;
struct sk_buff *skb = arg;
u_long flags;
if (pr == (PH_DATA | REQUEST)) {
spin_lock_irqsave(&bcs->cs->lock, flags);
if (bcs->tx_skb) {
skb_queue_tail(&bcs->squeue, skb);
} else {
bcs->tx_skb = skb;
test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->hw.hscx.count = 0;
write_modem(bcs);
}
spin_unlock_irqrestore(&bcs->cs->lock, flags);
} else if (pr == (PH_ACTIVATE | REQUEST)) {
test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
set_arcofi(bcs->cs, st->l1.bc);
mstartup(bcs->cs);
modem_set_dial(bcs->cs, test_bit(FLG_ORIG, &st->l2.flag));
bcs->cs->hw.elsa.MFlag=2;
} else if (pr == (PH_DEACTIVATE | REQUEST)) {
test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
bcs->cs->dc.isac.arcofi_bc = st->l1.bc;
arcofi_fsm(bcs->cs, ARCOFI_START, &ARCOFI_XOP_0);
interruptible_sleep_on(&bcs->cs->dc.isac.arcofi_wait);
bcs->cs->hw.elsa.MFlag=1;
} else {
printk(KERN_WARNING"ElsaSer: unknown pr %x\n", pr);
}
}
static int
setstack_elsa(struct PStack *st, struct BCState *bcs)
{
bcs->channel = st->l1.bc;
switch (st->l1.mode) {
case L1_MODE_HDLC:
case L1_MODE_TRANS:
if (open_hscxstate(st->l1.hardware, bcs))
return (-1);
st->l2.l2l1 = hscx_l2l1;
break;
case L1_MODE_MODEM:
bcs->mode = L1_MODE_MODEM;
if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
bcs->hw.hscx.rcvbuf = bcs->cs->hw.elsa.rcvbuf;
skb_queue_head_init(&bcs->rqueue);
skb_queue_head_init(&bcs->squeue);
}
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->event = 0;
bcs->hw.hscx.rcvidx = 0;
bcs->tx_cnt = 0;
bcs->cs->hw.elsa.bcs = bcs;
st->l2.l2l1 = modem_l2l1;
break;
}
st->l1.bcs = bcs;
setstack_manager(st);
bcs->st = st;
setstack_l1_B(st);
return (0);
}
static void
init_modem(struct IsdnCardState *cs) {
cs->bcs[0].BC_SetStack = setstack_elsa;
cs->bcs[1].BC_SetStack = setstack_elsa;
cs->bcs[0].BC_Close = close_elsastate;
cs->bcs[1].BC_Close = close_elsastate;
if (!(cs->hw.elsa.rcvbuf = kmalloc(MAX_MODEM_BUF,
GFP_ATOMIC))) {
printk(KERN_WARNING
"Elsa: No modem mem hw.elsa.rcvbuf\n");
return;
}
if (!(cs->hw.elsa.transbuf = kmalloc(MAX_MODEM_BUF,
GFP_ATOMIC))) {
printk(KERN_WARNING
"Elsa: No modem mem hw.elsa.transbuf\n");
kfree(cs->hw.elsa.rcvbuf);
cs->hw.elsa.rcvbuf = NULL;
return;
}
if (mstartup(cs)) {
printk(KERN_WARNING "Elsa: problem startup modem\n");
}
modem_set_init(cs);
}
static void
release_modem(struct IsdnCardState *cs) {
cs->hw.elsa.MFlag = 0;
if (cs->hw.elsa.transbuf) {
if (cs->hw.elsa.rcvbuf) {
mshutdown(cs);
kfree(cs->hw.elsa.rcvbuf);
cs->hw.elsa.rcvbuf = NULL;
}
kfree(cs->hw.elsa.transbuf);
cs->hw.elsa.transbuf = NULL;
}
}
| embeddedarm/linux-2.6.35-ts4800 | drivers/isdn/hisax/elsa_ser.c | C | gpl-2.0 | 17,033 |
/*
* Multiple format streaming server
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define HAVE_AV_CONFIG_H
#include "avformat.h"
#include <stdarg.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/poll.h>
#include <errno.h>
#include <sys/time.h>
#undef time //needed because HAVE_AV_CONFIG_H is defined on top
#include <time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <signal.h>
#ifdef CONFIG_HAVE_DLFCN
#include <dlfcn.h>
#endif
#include "ffserver.h"
/* maximum number of simultaneous HTTP connections */
#define HTTP_MAX_CONNECTIONS 2000
enum HTTPState {
HTTPSTATE_WAIT_REQUEST,
HTTPSTATE_SEND_HEADER,
HTTPSTATE_SEND_DATA_HEADER,
HTTPSTATE_SEND_DATA, /* sending TCP or UDP data */
HTTPSTATE_SEND_DATA_TRAILER,
HTTPSTATE_RECEIVE_DATA,
HTTPSTATE_WAIT_FEED, /* wait for data from the feed */
HTTPSTATE_READY,
RTSPSTATE_WAIT_REQUEST,
RTSPSTATE_SEND_REPLY,
RTSPSTATE_SEND_PACKET,
};
const char *http_state[] = {
"HTTP_WAIT_REQUEST",
"HTTP_SEND_HEADER",
"SEND_DATA_HEADER",
"SEND_DATA",
"SEND_DATA_TRAILER",
"RECEIVE_DATA",
"WAIT_FEED",
"READY",
"RTSP_WAIT_REQUEST",
"RTSP_SEND_REPLY",
"RTSP_SEND_PACKET",
};
#define IOBUFFER_INIT_SIZE 8192
/* coef for exponential mean for bitrate estimation in statistics */
#define AVG_COEF 0.9
/* timeouts are in ms */
#define HTTP_REQUEST_TIMEOUT (15 * 1000)
#define RTSP_REQUEST_TIMEOUT (3600 * 24 * 1000)
#define SYNC_TIMEOUT (10 * 1000)
typedef struct {
int64_t count1, count2;
long time1, time2;
} DataRateData;
/* context associated with one connection */
typedef struct HTTPContext {
enum HTTPState state;
int fd; /* socket file descriptor */
struct sockaddr_in from_addr; /* origin */
struct pollfd *poll_entry; /* used when polling */
long timeout;
uint8_t *buffer_ptr, *buffer_end;
int http_error;
struct HTTPContext *next;
int got_key_frame; /* stream 0 => 1, stream 1 => 2, stream 2=> 4 */
int64_t data_count;
/* feed input */
int feed_fd;
/* input format handling */
AVFormatContext *fmt_in;
long start_time; /* In milliseconds - this wraps fairly often */
int64_t first_pts; /* initial pts value */
int64_t cur_pts; /* current pts value from the stream in us */
int64_t cur_frame_duration; /* duration of the current frame in us */
int cur_frame_bytes; /* output frame size, needed to compute
the time at which we send each
packet */
int pts_stream_index; /* stream we choose as clock reference */
int64_t cur_clock; /* current clock reference value in us */
/* output format handling */
struct FFStream *stream;
/* -1 is invalid stream */
int feed_streams[MAX_STREAMS]; /* index of streams in the feed */
int switch_feed_streams[MAX_STREAMS]; /* index of streams in the feed */
int switch_pending;
AVFormatContext fmt_ctx; /* instance of FFStream for one user */
int last_packet_sent; /* true if last data packet was sent */
int suppress_log;
DataRateData datarate;
int wmp_client_id;
char protocol[16];
char method[16];
char url[128];
int buffer_size;
uint8_t *buffer;
int is_packetized; /* if true, the stream is packetized */
int packet_stream_index; /* current stream for output in state machine */
/* RTSP state specific */
uint8_t *pb_buffer; /* XXX: use that in all the code */
ByteIOContext *pb;
int seq; /* RTSP sequence number */
/* RTP state specific */
enum RTSPProtocol rtp_protocol;
char session_id[32]; /* session id */
AVFormatContext *rtp_ctx[MAX_STREAMS];
/* RTP/UDP specific */
URLContext *rtp_handles[MAX_STREAMS];
/* RTP/TCP specific */
struct HTTPContext *rtsp_c;
uint8_t *packet_buffer, *packet_buffer_ptr, *packet_buffer_end;
} HTTPContext;
static AVFrame dummy_frame;
/* each generated stream is described here */
enum StreamType {
STREAM_TYPE_LIVE,
STREAM_TYPE_STATUS,
STREAM_TYPE_REDIRECT,
};
enum IPAddressAction {
IP_ALLOW = 1,
IP_DENY,
};
typedef struct IPAddressACL {
struct IPAddressACL *next;
enum IPAddressAction action;
/* These are in host order */
struct in_addr first;
struct in_addr last;
} IPAddressACL;
/* description of each stream of the ffserver.conf file */
typedef struct FFStream {
enum StreamType stream_type;
char filename[1024]; /* stream filename */
struct FFStream *feed; /* feed we are using (can be null if
coming from file) */
AVFormatParameters *ap_in; /* input parameters */
AVInputFormat *ifmt; /* if non NULL, force input format */
AVOutputFormat *fmt;
IPAddressACL *acl;
int nb_streams;
int prebuffer; /* Number of millseconds early to start */
long max_time; /* Number of milliseconds to run */
int send_on_key;
AVStream *streams[MAX_STREAMS];
int feed_streams[MAX_STREAMS]; /* index of streams in the feed */
char feed_filename[1024]; /* file name of the feed storage, or
input file name for a stream */
char author[512];
char title[512];
char copyright[512];
char comment[512];
pid_t pid; /* Of ffmpeg process */
time_t pid_start; /* Of ffmpeg process */
char **child_argv;
struct FFStream *next;
int bandwidth; /* bandwidth, in kbits/s */
/* RTSP options */
char *rtsp_option;
/* multicast specific */
int is_multicast;
struct in_addr multicast_ip;
int multicast_port; /* first port used for multicast */
int multicast_ttl;
int loop; /* if true, send the stream in loops (only meaningful if file) */
/* feed specific */
int feed_opened; /* true if someone is writing to the feed */
int is_feed; /* true if it is a feed */
int readonly; /* True if writing is prohibited to the file */
int conns_served;
int64_t bytes_served;
int64_t feed_max_size; /* maximum storage size */
int64_t feed_write_index; /* current write position in feed (it wraps round) */
int64_t feed_size; /* current size of feed */
struct FFStream *next_feed;
} FFStream;
typedef struct FeedData {
long long data_count;
float avg_frame_size; /* frame size averraged over last frames with exponential mean */
} FeedData;
struct sockaddr_in my_http_addr;
struct sockaddr_in my_rtsp_addr;
char logfilename[1024];
HTTPContext *first_http_ctx;
FFStream *first_feed; /* contains only feeds */
FFStream *first_stream; /* contains all streams, including feeds */
static void new_connection(int server_fd, int is_rtsp);
static void close_connection(HTTPContext *c);
/* HTTP handling */
static int handle_connection(HTTPContext *c);
static int http_parse_request(HTTPContext *c);
static int http_send_data(HTTPContext *c);
static void compute_stats(HTTPContext *c);
static int open_input_stream(HTTPContext *c, const char *info);
static int http_start_receive_data(HTTPContext *c);
static int http_receive_data(HTTPContext *c);
/* RTSP handling */
static int rtsp_parse_request(HTTPContext *c);
static void rtsp_cmd_describe(HTTPContext *c, const char *url);
static void rtsp_cmd_options(HTTPContext *c, const char *url);
static void rtsp_cmd_setup(HTTPContext *c, const char *url, RTSPHeader *h);
static void rtsp_cmd_play(HTTPContext *c, const char *url, RTSPHeader *h);
static void rtsp_cmd_pause(HTTPContext *c, const char *url, RTSPHeader *h);
static void rtsp_cmd_teardown(HTTPContext *c, const char *url, RTSPHeader *h);
/* SDP handling */
static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
struct in_addr my_ip);
/* RTP handling */
static HTTPContext *rtp_new_connection(struct sockaddr_in *from_addr,
FFStream *stream, const char *session_id,
enum RTSPProtocol rtp_protocol);
static int rtp_new_av_stream(HTTPContext *c,
int stream_index, struct sockaddr_in *dest_addr,
HTTPContext *rtsp_c);
static const char *my_program_name;
static const char *my_program_dir;
static int ffserver_debug;
static int ffserver_daemon;
static int no_launch;
static int need_to_start_children;
int nb_max_connections;
int nb_connections;
int max_bandwidth;
int current_bandwidth;
static long cur_time; // Making this global saves on passing it around everywhere
static long gettime_ms(void)
{
struct timeval tv;
gettimeofday(&tv,NULL);
return (long long)tv.tv_sec * 1000 + (tv.tv_usec / 1000);
}
static FILE *logfile = NULL;
static void __attribute__ ((format (printf, 1, 2))) http_log(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (logfile) {
vfprintf(logfile, fmt, ap);
fflush(logfile);
}
va_end(ap);
}
static char *ctime1(char *buf2)
{
time_t ti;
char *p;
ti = time(NULL);
p = ctime(&ti);
strcpy(buf2, p);
p = buf2 + strlen(p) - 1;
if (*p == '\n')
*p = '\0';
return buf2;
}
static void log_connection(HTTPContext *c)
{
char buf2[32];
if (c->suppress_log)
return;
http_log("%s - - [%s] \"%s %s %s\" %d %lld\n",
inet_ntoa(c->from_addr.sin_addr),
ctime1(buf2), c->method, c->url,
c->protocol, (c->http_error ? c->http_error : 200), c->data_count);
}
static void update_datarate(DataRateData *drd, int64_t count)
{
if (!drd->time1 && !drd->count1) {
drd->time1 = drd->time2 = cur_time;
drd->count1 = drd->count2 = count;
} else {
if (cur_time - drd->time2 > 5000) {
drd->time1 = drd->time2;
drd->count1 = drd->count2;
drd->time2 = cur_time;
drd->count2 = count;
}
}
}
/* In bytes per second */
static int compute_datarate(DataRateData *drd, int64_t count)
{
if (cur_time == drd->time1)
return 0;
return ((count - drd->count1) * 1000) / (cur_time - drd->time1);
}
static int get_longterm_datarate(DataRateData *drd, int64_t count)
{
/* You get the first 3 seconds flat out */
if (cur_time - drd->time1 < 3000)
return 0;
return compute_datarate(drd, count);
}
static void start_children(FFStream *feed)
{
if (no_launch)
return;
for (; feed; feed = feed->next) {
if (feed->child_argv && !feed->pid) {
feed->pid_start = time(0);
feed->pid = fork();
if (feed->pid < 0) {
fprintf(stderr, "Unable to create children\n");
exit(1);
}
if (!feed->pid) {
/* In child */
char pathname[1024];
char *slash;
int i;
for (i = 3; i < 256; i++) {
close(i);
}
if (!ffserver_debug) {
i = open("/dev/null", O_RDWR);
if (i)
dup2(i, 0);
dup2(i, 1);
dup2(i, 2);
if (i)
close(i);
}
pstrcpy(pathname, sizeof(pathname), my_program_name);
slash = strrchr(pathname, '/');
if (!slash) {
slash = pathname;
} else {
slash++;
}
strcpy(slash, "ffmpeg");
/* This is needed to make relative pathnames work */
chdir(my_program_dir);
signal(SIGPIPE, SIG_DFL);
execvp(pathname, feed->child_argv);
_exit(1);
}
}
}
}
/* open a listening socket */
static int socket_open_listen(struct sockaddr_in *my_addr)
{
int server_fd, tmp;
server_fd = socket(AF_INET,SOCK_STREAM,0);
if (server_fd < 0) {
perror ("socket");
return -1;
}
tmp = 1;
setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof(tmp));
if (bind (server_fd, (struct sockaddr *) my_addr, sizeof (*my_addr)) < 0) {
char bindmsg[32];
snprintf(bindmsg, sizeof(bindmsg), "bind(port %d)", ntohs(my_addr->sin_port));
perror (bindmsg);
close(server_fd);
return -1;
}
if (listen (server_fd, 5) < 0) {
perror ("listen");
close(server_fd);
return -1;
}
fcntl(server_fd, F_SETFL, O_NONBLOCK);
return server_fd;
}
/* start all multicast streams */
static void start_multicast(void)
{
FFStream *stream;
char session_id[32];
HTTPContext *rtp_c;
struct sockaddr_in dest_addr;
int default_port, stream_index;
default_port = 6000;
for(stream = first_stream; stream != NULL; stream = stream->next) {
if (stream->is_multicast) {
/* open the RTP connection */
snprintf(session_id, sizeof(session_id),
"%08x%08x", (int)random(), (int)random());
/* choose a port if none given */
if (stream->multicast_port == 0) {
stream->multicast_port = default_port;
default_port += 100;
}
dest_addr.sin_family = AF_INET;
dest_addr.sin_addr = stream->multicast_ip;
dest_addr.sin_port = htons(stream->multicast_port);
rtp_c = rtp_new_connection(&dest_addr, stream, session_id,
RTSP_PROTOCOL_RTP_UDP_MULTICAST);
if (!rtp_c) {
continue;
}
if (open_input_stream(rtp_c, "") < 0) {
fprintf(stderr, "Could not open input stream for stream '%s'\n",
stream->filename);
continue;
}
/* open each RTP stream */
for(stream_index = 0; stream_index < stream->nb_streams;
stream_index++) {
dest_addr.sin_port = htons(stream->multicast_port +
2 * stream_index);
if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr, NULL) < 0) {
fprintf(stderr, "Could not open output stream '%s/streamid=%d'\n",
stream->filename, stream_index);
exit(1);
}
}
/* change state to send data */
rtp_c->state = HTTPSTATE_SEND_DATA;
}
}
}
/* main loop of the http server */
static int http_server(void)
{
int server_fd, ret, rtsp_server_fd, delay, delay1;
struct pollfd poll_table[HTTP_MAX_CONNECTIONS + 2], *poll_entry;
HTTPContext *c, *c_next;
server_fd = socket_open_listen(&my_http_addr);
if (server_fd < 0)
return -1;
rtsp_server_fd = socket_open_listen(&my_rtsp_addr);
if (rtsp_server_fd < 0)
return -1;
http_log("ffserver started.\n");
start_children(first_feed);
first_http_ctx = NULL;
nb_connections = 0;
start_multicast();
for(;;) {
poll_entry = poll_table;
poll_entry->fd = server_fd;
poll_entry->events = POLLIN;
poll_entry++;
poll_entry->fd = rtsp_server_fd;
poll_entry->events = POLLIN;
poll_entry++;
/* wait for events on each HTTP handle */
c = first_http_ctx;
delay = 1000;
while (c != NULL) {
int fd;
fd = c->fd;
switch(c->state) {
case HTTPSTATE_SEND_HEADER:
case RTSPSTATE_SEND_REPLY:
case RTSPSTATE_SEND_PACKET:
c->poll_entry = poll_entry;
poll_entry->fd = fd;
poll_entry->events = POLLOUT;
poll_entry++;
break;
case HTTPSTATE_SEND_DATA_HEADER:
case HTTPSTATE_SEND_DATA:
case HTTPSTATE_SEND_DATA_TRAILER:
if (!c->is_packetized) {
/* for TCP, we output as much as we can (may need to put a limit) */
c->poll_entry = poll_entry;
poll_entry->fd = fd;
poll_entry->events = POLLOUT;
poll_entry++;
} else {
/* when ffserver is doing the timing, we work by
looking at which packet need to be sent every
10 ms */
delay1 = 10; /* one tick wait XXX: 10 ms assumed */
if (delay1 < delay)
delay = delay1;
}
break;
case HTTPSTATE_WAIT_REQUEST:
case HTTPSTATE_RECEIVE_DATA:
case HTTPSTATE_WAIT_FEED:
case RTSPSTATE_WAIT_REQUEST:
/* need to catch errors */
c->poll_entry = poll_entry;
poll_entry->fd = fd;
poll_entry->events = POLLIN;/* Maybe this will work */
poll_entry++;
break;
default:
c->poll_entry = NULL;
break;
}
c = c->next;
}
/* wait for an event on one connection. We poll at least every
second to handle timeouts */
do {
ret = poll(poll_table, poll_entry - poll_table, delay);
if (ret < 0 && errno != EAGAIN && errno != EINTR)
return -1;
} while (ret <= 0);
cur_time = gettime_ms();
if (need_to_start_children) {
need_to_start_children = 0;
start_children(first_feed);
}
/* now handle the events */
for(c = first_http_ctx; c != NULL; c = c_next) {
c_next = c->next;
if (handle_connection(c) < 0) {
/* close and free the connection */
log_connection(c);
close_connection(c);
}
}
poll_entry = poll_table;
/* new HTTP connection request ? */
if (poll_entry->revents & POLLIN) {
new_connection(server_fd, 0);
}
poll_entry++;
/* new RTSP connection request ? */
if (poll_entry->revents & POLLIN) {
new_connection(rtsp_server_fd, 1);
}
}
}
/* start waiting for a new HTTP/RTSP request */
static void start_wait_request(HTTPContext *c, int is_rtsp)
{
c->buffer_ptr = c->buffer;
c->buffer_end = c->buffer + c->buffer_size - 1; /* leave room for '\0' */
if (is_rtsp) {
c->timeout = cur_time + RTSP_REQUEST_TIMEOUT;
c->state = RTSPSTATE_WAIT_REQUEST;
} else {
c->timeout = cur_time + HTTP_REQUEST_TIMEOUT;
c->state = HTTPSTATE_WAIT_REQUEST;
}
}
static void new_connection(int server_fd, int is_rtsp)
{
struct sockaddr_in from_addr;
int fd, len;
HTTPContext *c = NULL;
len = sizeof(from_addr);
fd = accept(server_fd, (struct sockaddr *)&from_addr,
&len);
if (fd < 0)
return;
fcntl(fd, F_SETFL, O_NONBLOCK);
/* XXX: should output a warning page when coming
close to the connection limit */
if (nb_connections >= nb_max_connections)
goto fail;
/* add a new connection */
c = av_mallocz(sizeof(HTTPContext));
if (!c)
goto fail;
c->fd = fd;
c->poll_entry = NULL;
c->from_addr = from_addr;
c->buffer_size = IOBUFFER_INIT_SIZE;
c->buffer = av_malloc(c->buffer_size);
if (!c->buffer)
goto fail;
c->next = first_http_ctx;
first_http_ctx = c;
nb_connections++;
start_wait_request(c, is_rtsp);
return;
fail:
if (c) {
av_free(c->buffer);
av_free(c);
}
close(fd);
}
static void close_connection(HTTPContext *c)
{
HTTPContext **cp, *c1;
int i, nb_streams;
AVFormatContext *ctx;
URLContext *h;
AVStream *st;
/* remove connection from list */
cp = &first_http_ctx;
while ((*cp) != NULL) {
c1 = *cp;
if (c1 == c) {
*cp = c->next;
} else {
cp = &c1->next;
}
}
/* remove references, if any (XXX: do it faster) */
for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) {
if (c1->rtsp_c == c)
c1->rtsp_c = NULL;
}
/* remove connection associated resources */
if (c->fd >= 0)
close(c->fd);
if (c->fmt_in) {
/* close each frame parser */
for(i=0;i<c->fmt_in->nb_streams;i++) {
st = c->fmt_in->streams[i];
if (st->codec.codec) {
avcodec_close(&st->codec);
}
}
av_close_input_file(c->fmt_in);
}
/* free RTP output streams if any */
nb_streams = 0;
if (c->stream)
nb_streams = c->stream->nb_streams;
for(i=0;i<nb_streams;i++) {
ctx = c->rtp_ctx[i];
if (ctx) {
av_write_trailer(ctx);
av_free(ctx);
}
h = c->rtp_handles[i];
if (h) {
url_close(h);
}
}
ctx = &c->fmt_ctx;
if (!c->last_packet_sent) {
if (ctx->oformat) {
/* prepare header */
if (url_open_dyn_buf(&ctx->pb) >= 0) {
av_write_trailer(ctx);
url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
}
}
}
for(i=0; i<ctx->nb_streams; i++)
av_free(ctx->streams[i]) ;
if (c->stream)
current_bandwidth -= c->stream->bandwidth;
av_freep(&c->pb_buffer);
av_freep(&c->packet_buffer);
av_free(c->buffer);
av_free(c);
nb_connections--;
}
static int handle_connection(HTTPContext *c)
{
int len, ret;
switch(c->state) {
case HTTPSTATE_WAIT_REQUEST:
case RTSPSTATE_WAIT_REQUEST:
/* timeout ? */
if ((c->timeout - cur_time) < 0)
return -1;
if (c->poll_entry->revents & (POLLERR | POLLHUP))
return -1;
/* no need to read if no events */
if (!(c->poll_entry->revents & POLLIN))
return 0;
/* read the data */
read_loop:
len = read(c->fd, c->buffer_ptr, 1);
if (len < 0) {
if (errno != EAGAIN && errno != EINTR)
return -1;
} else if (len == 0) {
return -1;
} else {
/* search for end of request. */
uint8_t *ptr;
c->buffer_ptr += len;
ptr = c->buffer_ptr;
if ((ptr >= c->buffer + 2 && !memcmp(ptr-2, "\n\n", 2)) ||
(ptr >= c->buffer + 4 && !memcmp(ptr-4, "\r\n\r\n", 4))) {
/* request found : parse it and reply */
if (c->state == HTTPSTATE_WAIT_REQUEST) {
ret = http_parse_request(c);
} else {
ret = rtsp_parse_request(c);
}
if (ret < 0)
return -1;
} else if (ptr >= c->buffer_end) {
/* request too long: cannot do anything */
return -1;
} else goto read_loop;
}
break;
case HTTPSTATE_SEND_HEADER:
if (c->poll_entry->revents & (POLLERR | POLLHUP))
return -1;
/* no need to write if no events */
if (!(c->poll_entry->revents & POLLOUT))
return 0;
len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
if (len < 0) {
if (errno != EAGAIN && errno != EINTR) {
/* error : close connection */
av_freep(&c->pb_buffer);
return -1;
}
} else {
c->buffer_ptr += len;
if (c->stream)
c->stream->bytes_served += len;
c->data_count += len;
if (c->buffer_ptr >= c->buffer_end) {
av_freep(&c->pb_buffer);
/* if error, exit */
if (c->http_error) {
return -1;
}
/* all the buffer was sent : synchronize to the incoming stream */
c->state = HTTPSTATE_SEND_DATA_HEADER;
c->buffer_ptr = c->buffer_end = c->buffer;
}
}
break;
case HTTPSTATE_SEND_DATA:
case HTTPSTATE_SEND_DATA_HEADER:
case HTTPSTATE_SEND_DATA_TRAILER:
/* for packetized output, we consider we can always write (the
input streams sets the speed). It may be better to verify
that we do not rely too much on the kernel queues */
if (!c->is_packetized) {
if (c->poll_entry->revents & (POLLERR | POLLHUP))
return -1;
/* no need to read if no events */
if (!(c->poll_entry->revents & POLLOUT))
return 0;
}
if (http_send_data(c) < 0)
return -1;
break;
case HTTPSTATE_RECEIVE_DATA:
/* no need to read if no events */
if (c->poll_entry->revents & (POLLERR | POLLHUP))
return -1;
if (!(c->poll_entry->revents & POLLIN))
return 0;
if (http_receive_data(c) < 0)
return -1;
break;
case HTTPSTATE_WAIT_FEED:
/* no need to read if no events */
if (c->poll_entry->revents & (POLLIN | POLLERR | POLLHUP))
return -1;
/* nothing to do, we'll be waken up by incoming feed packets */
break;
case RTSPSTATE_SEND_REPLY:
if (c->poll_entry->revents & (POLLERR | POLLHUP)) {
av_freep(&c->pb_buffer);
return -1;
}
/* no need to write if no events */
if (!(c->poll_entry->revents & POLLOUT))
return 0;
len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
if (len < 0) {
if (errno != EAGAIN && errno != EINTR) {
/* error : close connection */
av_freep(&c->pb_buffer);
return -1;
}
} else {
c->buffer_ptr += len;
c->data_count += len;
if (c->buffer_ptr >= c->buffer_end) {
/* all the buffer was sent : wait for a new request */
av_freep(&c->pb_buffer);
start_wait_request(c, 1);
}
}
break;
case RTSPSTATE_SEND_PACKET:
if (c->poll_entry->revents & (POLLERR | POLLHUP)) {
av_freep(&c->packet_buffer);
return -1;
}
/* no need to write if no events */
if (!(c->poll_entry->revents & POLLOUT))
return 0;
len = write(c->fd, c->packet_buffer_ptr,
c->packet_buffer_end - c->packet_buffer_ptr);
if (len < 0) {
if (errno != EAGAIN && errno != EINTR) {
/* error : close connection */
av_freep(&c->packet_buffer);
return -1;
}
} else {
c->packet_buffer_ptr += len;
if (c->packet_buffer_ptr >= c->packet_buffer_end) {
/* all the buffer was sent : wait for a new request */
av_freep(&c->packet_buffer);
c->state = RTSPSTATE_WAIT_REQUEST;
}
}
break;
case HTTPSTATE_READY:
/* nothing to do */
break;
default:
return -1;
}
return 0;
}
static int extract_rates(char *rates, int ratelen, const char *request)
{
const char *p;
for (p = request; *p && *p != '\r' && *p != '\n'; ) {
if (strncasecmp(p, "Pragma:", 7) == 0) {
const char *q = p + 7;
while (*q && *q != '\n' && isspace(*q))
q++;
if (strncasecmp(q, "stream-switch-entry=", 20) == 0) {
int stream_no;
int rate_no;
q += 20;
memset(rates, 0xff, ratelen);
while (1) {
while (*q && *q != '\n' && *q != ':')
q++;
if (sscanf(q, ":%d:%d", &stream_no, &rate_no) != 2) {
break;
}
stream_no--;
if (stream_no < ratelen && stream_no >= 0) {
rates[stream_no] = rate_no;
}
while (*q && *q != '\n' && !isspace(*q))
q++;
}
return 1;
}
}
p = strchr(p, '\n');
if (!p)
break;
p++;
}
return 0;
}
static int find_stream_in_feed(FFStream *feed, AVCodecContext *codec, int bit_rate)
{
int i;
int best_bitrate = 100000000;
int best = -1;
for (i = 0; i < feed->nb_streams; i++) {
AVCodecContext *feed_codec = &feed->streams[i]->codec;
if (feed_codec->codec_id != codec->codec_id ||
feed_codec->sample_rate != codec->sample_rate ||
feed_codec->width != codec->width ||
feed_codec->height != codec->height) {
continue;
}
/* Potential stream */
/* We want the fastest stream less than bit_rate, or the slowest
* faster than bit_rate
*/
if (feed_codec->bit_rate <= bit_rate) {
if (best_bitrate > bit_rate || feed_codec->bit_rate > best_bitrate) {
best_bitrate = feed_codec->bit_rate;
best = i;
}
} else {
if (feed_codec->bit_rate < best_bitrate) {
best_bitrate = feed_codec->bit_rate;
best = i;
}
}
}
return best;
}
static int modify_current_stream(HTTPContext *c, char *rates)
{
int i;
FFStream *req = c->stream;
int action_required = 0;
/* Not much we can do for a feed */
if (!req->feed)
return 0;
for (i = 0; i < req->nb_streams; i++) {
AVCodecContext *codec = &req->streams[i]->codec;
switch(rates[i]) {
case 0:
c->switch_feed_streams[i] = req->feed_streams[i];
break;
case 1:
c->switch_feed_streams[i] = find_stream_in_feed(req->feed, codec, codec->bit_rate / 2);
break;
case 2:
/* Wants off or slow */
c->switch_feed_streams[i] = find_stream_in_feed(req->feed, codec, codec->bit_rate / 4);
#ifdef WANTS_OFF
/* This doesn't work well when it turns off the only stream! */
c->switch_feed_streams[i] = -2;
c->feed_streams[i] = -2;
#endif
break;
}
if (c->switch_feed_streams[i] >= 0 && c->switch_feed_streams[i] != c->feed_streams[i])
action_required = 1;
}
return action_required;
}
static void do_switch_stream(HTTPContext *c, int i)
{
if (c->switch_feed_streams[i] >= 0) {
#ifdef PHILIP
c->feed_streams[i] = c->switch_feed_streams[i];
#endif
/* Now update the stream */
}
c->switch_feed_streams[i] = -1;
}
/* XXX: factorize in utils.c ? */
/* XXX: take care with different space meaning */
static void skip_spaces(const char **pp)
{
const char *p;
p = *pp;
while (*p == ' ' || *p == '\t')
p++;
*pp = p;
}
static void get_word(char *buf, int buf_size, const char **pp)
{
const char *p;
char *q;
p = *pp;
skip_spaces(&p);
q = buf;
while (!isspace(*p) && *p != '\0') {
if ((q - buf) < buf_size - 1)
*q++ = *p;
p++;
}
if (buf_size > 0)
*q = '\0';
*pp = p;
}
static int validate_acl(FFStream *stream, HTTPContext *c)
{
enum IPAddressAction last_action = IP_DENY;
IPAddressACL *acl;
struct in_addr *src = &c->from_addr.sin_addr;
unsigned long src_addr = ntohl(src->s_addr);
for (acl = stream->acl; acl; acl = acl->next) {
if (src_addr >= acl->first.s_addr && src_addr <= acl->last.s_addr) {
return (acl->action == IP_ALLOW) ? 1 : 0;
}
last_action = acl->action;
}
/* Nothing matched, so return not the last action */
return (last_action == IP_DENY) ? 1 : 0;
}
/* compute the real filename of a file by matching it without its
extensions to all the stream filenames */
static void compute_real_filename(char *filename, int max_size)
{
char file1[1024];
char file2[1024];
char *p;
FFStream *stream;
/* compute filename by matching without the file extensions */
pstrcpy(file1, sizeof(file1), filename);
p = strrchr(file1, '.');
if (p)
*p = '\0';
for(stream = first_stream; stream != NULL; stream = stream->next) {
pstrcpy(file2, sizeof(file2), stream->filename);
p = strrchr(file2, '.');
if (p)
*p = '\0';
if (!strcmp(file1, file2)) {
pstrcpy(filename, max_size, stream->filename);
break;
}
}
}
enum RedirType {
REDIR_NONE,
REDIR_ASX,
REDIR_RAM,
REDIR_ASF,
REDIR_RTSP,
REDIR_SDP,
};
/* parse http request and prepare header */
static int http_parse_request(HTTPContext *c)
{
char *p;
int post;
enum RedirType redir_type;
char cmd[32];
char info[1024], *filename;
char url[1024], *q;
char protocol[32];
char msg[1024];
const char *mime_type;
FFStream *stream;
int i;
char ratebuf[32];
char *useragent = 0;
p = c->buffer;
get_word(cmd, sizeof(cmd), (const char **)&p);
pstrcpy(c->method, sizeof(c->method), cmd);
if (!strcmp(cmd, "GET"))
post = 0;
else if (!strcmp(cmd, "POST"))
post = 1;
else
return -1;
get_word(url, sizeof(url), (const char **)&p);
pstrcpy(c->url, sizeof(c->url), url);
get_word(protocol, sizeof(protocol), (const char **)&p);
if (strcmp(protocol, "HTTP/1.0") && strcmp(protocol, "HTTP/1.1"))
return -1;
pstrcpy(c->protocol, sizeof(c->protocol), protocol);
/* find the filename and the optional info string in the request */
p = url;
if (*p == '/')
p++;
filename = p;
p = strchr(p, '?');
if (p) {
pstrcpy(info, sizeof(info), p);
*p = '\0';
} else {
info[0] = '\0';
}
for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) {
if (strncasecmp(p, "User-Agent:", 11) == 0) {
useragent = p + 11;
if (*useragent && *useragent != '\n' && isspace(*useragent))
useragent++;
break;
}
p = strchr(p, '\n');
if (!p)
break;
p++;
}
redir_type = REDIR_NONE;
if (match_ext(filename, "asx")) {
redir_type = REDIR_ASX;
filename[strlen(filename)-1] = 'f';
} else if (match_ext(filename, "asf") &&
(!useragent || strncasecmp(useragent, "NSPlayer", 8) != 0)) {
/* if this isn't WMP or lookalike, return the redirector file */
redir_type = REDIR_ASF;
} else if (match_ext(filename, "rpm,ram")) {
redir_type = REDIR_RAM;
strcpy(filename + strlen(filename)-2, "m");
} else if (match_ext(filename, "rtsp")) {
redir_type = REDIR_RTSP;
compute_real_filename(filename, sizeof(url) - 1);
} else if (match_ext(filename, "sdp")) {
redir_type = REDIR_SDP;
compute_real_filename(filename, sizeof(url) - 1);
}
stream = first_stream;
while (stream != NULL) {
if (!strcmp(stream->filename, filename) && validate_acl(stream, c))
break;
stream = stream->next;
}
if (stream == NULL) {
snprintf(msg, sizeof(msg), "File '%s' not found", url);
goto send_error;
}
c->stream = stream;
memcpy(c->feed_streams, stream->feed_streams, sizeof(c->feed_streams));
memset(c->switch_feed_streams, -1, sizeof(c->switch_feed_streams));
if (stream->stream_type == STREAM_TYPE_REDIRECT) {
c->http_error = 301;
q = c->buffer;
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 301 Moved\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Location: %s\r\n", stream->feed_filename);
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: text/html\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<html><head><title>Moved</title></head><body>\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "You should be <a href=\"%s\">redirected</a>.\r\n", stream->feed_filename);
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</body></html>\r\n");
/* prepare output buffer */
c->buffer_ptr = c->buffer;
c->buffer_end = q;
c->state = HTTPSTATE_SEND_HEADER;
return 0;
}
/* If this is WMP, get the rate information */
if (extract_rates(ratebuf, sizeof(ratebuf), c->buffer)) {
if (modify_current_stream(c, ratebuf)) {
for (i = 0; i < sizeof(c->feed_streams) / sizeof(c->feed_streams[0]); i++) {
if (c->switch_feed_streams[i] >= 0)
do_switch_stream(c, i);
}
}
}
if (post == 0 && stream->stream_type == STREAM_TYPE_LIVE) {
current_bandwidth += stream->bandwidth;
}
if (post == 0 && max_bandwidth < current_bandwidth) {
c->http_error = 200;
q = c->buffer;
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 Server too busy\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: text/html\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<html><head><title>Too busy</title></head><body>\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "The server is too busy to serve your request at this time.<p>\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "The bandwidth being served (including your stream) is %dkbit/sec, and this exceeds the limit of %dkbit/sec\r\n",
current_bandwidth, max_bandwidth);
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</body></html>\r\n");
/* prepare output buffer */
c->buffer_ptr = c->buffer;
c->buffer_end = q;
c->state = HTTPSTATE_SEND_HEADER;
return 0;
}
if (redir_type != REDIR_NONE) {
char *hostinfo = 0;
for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) {
if (strncasecmp(p, "Host:", 5) == 0) {
hostinfo = p + 5;
break;
}
p = strchr(p, '\n');
if (!p)
break;
p++;
}
if (hostinfo) {
char *eoh;
char hostbuf[260];
while (isspace(*hostinfo))
hostinfo++;
eoh = strchr(hostinfo, '\n');
if (eoh) {
if (eoh[-1] == '\r')
eoh--;
if (eoh - hostinfo < sizeof(hostbuf) - 1) {
memcpy(hostbuf, hostinfo, eoh - hostinfo);
hostbuf[eoh - hostinfo] = 0;
c->http_error = 200;
q = c->buffer;
switch(redir_type) {
case REDIR_ASX:
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 ASX Follows\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: video/x-ms-asf\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<ASX Version=\"3\">\r\n");
//q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<!-- Autogenerated by ffserver -->\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<ENTRY><REF HREF=\"http://%s/%s%s\"/></ENTRY>\r\n",
hostbuf, filename, info);
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</ASX>\r\n");
break;
case REDIR_RAM:
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 RAM Follows\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: audio/x-pn-realaudio\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "# Autogenerated by ffserver\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "http://%s/%s%s\r\n",
hostbuf, filename, info);
break;
case REDIR_ASF:
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 ASF Redirect follows\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: video/x-ms-asf\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "[Reference]\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Ref1=http://%s/%s%s\r\n",
hostbuf, filename, info);
break;
case REDIR_RTSP:
{
char hostname[256], *p;
/* extract only hostname */
pstrcpy(hostname, sizeof(hostname), hostbuf);
p = strrchr(hostname, ':');
if (p)
*p = '\0';
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 RTSP Redirect follows\r\n");
/* XXX: incorrect mime type ? */
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: application/x-rtsp\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "rtsp://%s:%d/%s\r\n",
hostname, ntohs(my_rtsp_addr.sin_port),
filename);
}
break;
case REDIR_SDP:
{
uint8_t *sdp_data;
int sdp_data_size, len;
struct sockaddr_in my_addr;
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 OK\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: application/sdp\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
len = sizeof(my_addr);
getsockname(c->fd, (struct sockaddr *)&my_addr, &len);
/* XXX: should use a dynamic buffer */
sdp_data_size = prepare_sdp_description(stream,
&sdp_data,
my_addr.sin_addr);
if (sdp_data_size > 0) {
memcpy(q, sdp_data, sdp_data_size);
q += sdp_data_size;
*q = '\0';
av_free(sdp_data);
}
}
break;
default:
av_abort();
break;
}
/* prepare output buffer */
c->buffer_ptr = c->buffer;
c->buffer_end = q;
c->state = HTTPSTATE_SEND_HEADER;
return 0;
}
}
}
snprintf(msg, sizeof(msg), "ASX/RAM file not handled");
goto send_error;
}
stream->conns_served++;
/* XXX: add there authenticate and IP match */
if (post) {
/* if post, it means a feed is being sent */
if (!stream->is_feed) {
/* However it might be a status report from WMP! Lets log the data
* as it might come in handy one day
*/
char *logline = 0;
int client_id = 0;
for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) {
if (strncasecmp(p, "Pragma: log-line=", 17) == 0) {
logline = p;
break;
}
if (strncasecmp(p, "Pragma: client-id=", 18) == 0) {
client_id = strtol(p + 18, 0, 10);
}
p = strchr(p, '\n');
if (!p)
break;
p++;
}
if (logline) {
char *eol = strchr(logline, '\n');
logline += 17;
if (eol) {
if (eol[-1] == '\r')
eol--;
http_log("%.*s\n", (int) (eol - logline), logline);
c->suppress_log = 1;
}
}
#ifdef DEBUG_WMP
http_log("\nGot request:\n%s\n", c->buffer);
#endif
if (client_id && extract_rates(ratebuf, sizeof(ratebuf), c->buffer)) {
HTTPContext *wmpc;
/* Now we have to find the client_id */
for (wmpc = first_http_ctx; wmpc; wmpc = wmpc->next) {
if (wmpc->wmp_client_id == client_id)
break;
}
if (wmpc) {
if (modify_current_stream(wmpc, ratebuf)) {
wmpc->switch_pending = 1;
}
}
}
snprintf(msg, sizeof(msg), "POST command not handled");
c->stream = 0;
goto send_error;
}
if (http_start_receive_data(c) < 0) {
snprintf(msg, sizeof(msg), "could not open feed");
goto send_error;
}
c->http_error = 0;
c->state = HTTPSTATE_RECEIVE_DATA;
return 0;
}
#ifdef DEBUG_WMP
if (strcmp(stream->filename + strlen(stream->filename) - 4, ".asf") == 0) {
http_log("\nGot request:\n%s\n", c->buffer);
}
#endif
if (c->stream->stream_type == STREAM_TYPE_STATUS)
goto send_stats;
/* open input stream */
if (open_input_stream(c, info) < 0) {
snprintf(msg, sizeof(msg), "Input stream corresponding to '%s' not found", url);
goto send_error;
}
/* prepare http header */
q = c->buffer;
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 OK\r\n");
mime_type = c->stream->fmt->mime_type;
if (!mime_type)
mime_type = "application/x-octet_stream";
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Pragma: no-cache\r\n");
/* for asf, we need extra headers */
if (!strcmp(c->stream->fmt->name,"asf_stream")) {
/* Need to allocate a client id */
c->wmp_client_id = random() & 0x7fffffff;
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Server: Cougar 4.1.0.3923\r\nCache-Control: no-cache\r\nPragma: client-id=%d\r\nPragma: features=\"broadcast\"\r\n", c->wmp_client_id);
}
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-Type: %s\r\n", mime_type);
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
/* prepare output buffer */
c->http_error = 0;
c->buffer_ptr = c->buffer;
c->buffer_end = q;
c->state = HTTPSTATE_SEND_HEADER;
return 0;
send_error:
c->http_error = 404;
q = c->buffer;
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 404 Not Found\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: %s\r\n", "text/html");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<HTML>\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<HEAD><TITLE>404 Not Found</TITLE></HEAD>\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<BODY>%s</BODY>\n", msg);
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</HTML>\n");
/* prepare output buffer */
c->buffer_ptr = c->buffer;
c->buffer_end = q;
c->state = HTTPSTATE_SEND_HEADER;
return 0;
send_stats:
compute_stats(c);
c->http_error = 200; /* horrible : we use this value to avoid
going to the send data state */
c->state = HTTPSTATE_SEND_HEADER;
return 0;
}
static void fmt_bytecount(ByteIOContext *pb, int64_t count)
{
static const char *suffix = " kMGTP";
const char *s;
for (s = suffix; count >= 100000 && s[1]; count /= 1000, s++) {
}
url_fprintf(pb, "%lld%c", count, *s);
}
static void compute_stats(HTTPContext *c)
{
HTTPContext *c1;
FFStream *stream;
char *p;
time_t ti;
int i, len;
ByteIOContext pb1, *pb = &pb1;
if (url_open_dyn_buf(pb) < 0) {
/* XXX: return an error ? */
c->buffer_ptr = c->buffer;
c->buffer_end = c->buffer;
return;
}
url_fprintf(pb, "HTTP/1.0 200 OK\r\n");
url_fprintf(pb, "Content-type: %s\r\n", "text/html");
url_fprintf(pb, "Pragma: no-cache\r\n");
url_fprintf(pb, "\r\n");
url_fprintf(pb, "<HEAD><TITLE>FFServer Status</TITLE>\n");
if (c->stream->feed_filename) {
url_fprintf(pb, "<link rel=\"shortcut icon\" href=\"%s\">\n", c->stream->feed_filename);
}
url_fprintf(pb, "</HEAD>\n<BODY>");
url_fprintf(pb, "<H1>FFServer Status</H1>\n");
/* format status */
url_fprintf(pb, "<H2>Available Streams</H2>\n");
url_fprintf(pb, "<TABLE cellspacing=0 cellpadding=4>\n");
url_fprintf(pb, "<TR><Th valign=top>Path<th align=left>Served<br>Conns<Th><br>bytes<Th valign=top>Format<Th>Bit rate<br>kbits/s<Th align=left>Video<br>kbits/s<th><br>Codec<Th align=left>Audio<br>kbits/s<th><br>Codec<Th align=left valign=top>Feed\n");
stream = first_stream;
while (stream != NULL) {
char sfilename[1024];
char *eosf;
if (stream->feed != stream) {
pstrcpy(sfilename, sizeof(sfilename) - 10, stream->filename);
eosf = sfilename + strlen(sfilename);
if (eosf - sfilename >= 4) {
if (strcmp(eosf - 4, ".asf") == 0) {
strcpy(eosf - 4, ".asx");
} else if (strcmp(eosf - 3, ".rm") == 0) {
strcpy(eosf - 3, ".ram");
} else if (stream->fmt == &rtp_mux) {
/* generate a sample RTSP director if
unicast. Generate an SDP redirector if
multicast */
eosf = strrchr(sfilename, '.');
if (!eosf)
eosf = sfilename + strlen(sfilename);
if (stream->is_multicast)
strcpy(eosf, ".sdp");
else
strcpy(eosf, ".rtsp");
}
}
url_fprintf(pb, "<TR><TD><A HREF=\"/%s\">%s</A> ",
sfilename, stream->filename);
url_fprintf(pb, "<td align=right> %d <td align=right> ",
stream->conns_served);
fmt_bytecount(pb, stream->bytes_served);
switch(stream->stream_type) {
case STREAM_TYPE_LIVE:
{
int audio_bit_rate = 0;
int video_bit_rate = 0;
const char *audio_codec_name = "";
const char *video_codec_name = "";
const char *audio_codec_name_extra = "";
const char *video_codec_name_extra = "";
for(i=0;i<stream->nb_streams;i++) {
AVStream *st = stream->streams[i];
AVCodec *codec = avcodec_find_encoder(st->codec.codec_id);
switch(st->codec.codec_type) {
case CODEC_TYPE_AUDIO:
audio_bit_rate += st->codec.bit_rate;
if (codec) {
if (*audio_codec_name)
audio_codec_name_extra = "...";
audio_codec_name = codec->name;
}
break;
case CODEC_TYPE_VIDEO:
video_bit_rate += st->codec.bit_rate;
if (codec) {
if (*video_codec_name)
video_codec_name_extra = "...";
video_codec_name = codec->name;
}
break;
case CODEC_TYPE_DATA:
video_bit_rate += st->codec.bit_rate;
break;
default:
av_abort();
}
}
url_fprintf(pb, "<TD align=center> %s <TD align=right> %d <TD align=right> %d <TD> %s %s <TD align=right> %d <TD> %s %s",
stream->fmt->name,
stream->bandwidth,
video_bit_rate / 1000, video_codec_name, video_codec_name_extra,
audio_bit_rate / 1000, audio_codec_name, audio_codec_name_extra);
if (stream->feed) {
url_fprintf(pb, "<TD>%s", stream->feed->filename);
} else {
url_fprintf(pb, "<TD>%s", stream->feed_filename);
}
url_fprintf(pb, "\n");
}
break;
default:
url_fprintf(pb, "<TD align=center> - <TD align=right> - <TD align=right> - <td><td align=right> - <TD>\n");
break;
}
}
stream = stream->next;
}
url_fprintf(pb, "</TABLE>\n");
stream = first_stream;
while (stream != NULL) {
if (stream->feed == stream) {
url_fprintf(pb, "<h2>Feed %s</h2>", stream->filename);
if (stream->pid) {
url_fprintf(pb, "Running as pid %d.\n", stream->pid);
#if defined(linux) && !defined(CONFIG_NOCUTILS)
{
FILE *pid_stat;
char ps_cmd[64];
/* This is somewhat linux specific I guess */
snprintf(ps_cmd, sizeof(ps_cmd),
"ps -o \"%%cpu,cputime\" --no-headers %d",
stream->pid);
pid_stat = popen(ps_cmd, "r");
if (pid_stat) {
char cpuperc[10];
char cpuused[64];
if (fscanf(pid_stat, "%10s %64s", cpuperc,
cpuused) == 2) {
url_fprintf(pb, "Currently using %s%% of the cpu. Total time used %s.\n",
cpuperc, cpuused);
}
fclose(pid_stat);
}
}
#endif
url_fprintf(pb, "<p>");
}
url_fprintf(pb, "<table cellspacing=0 cellpadding=4><tr><th>Stream<th>type<th>kbits/s<th align=left>codec<th align=left>Parameters\n");
for (i = 0; i < stream->nb_streams; i++) {
AVStream *st = stream->streams[i];
AVCodec *codec = avcodec_find_encoder(st->codec.codec_id);
const char *type = "unknown";
char parameters[64];
parameters[0] = 0;
switch(st->codec.codec_type) {
case CODEC_TYPE_AUDIO:
type = "audio";
break;
case CODEC_TYPE_VIDEO:
type = "video";
snprintf(parameters, sizeof(parameters), "%dx%d, q=%d-%d, fps=%d", st->codec.width, st->codec.height,
st->codec.qmin, st->codec.qmax, st->codec.frame_rate / st->codec.frame_rate_base);
break;
default:
av_abort();
}
url_fprintf(pb, "<tr><td align=right>%d<td>%s<td align=right>%d<td>%s<td>%s\n",
i, type, st->codec.bit_rate/1000, codec ? codec->name : "", parameters);
}
url_fprintf(pb, "</table>\n");
}
stream = stream->next;
}
#if 0
{
float avg;
AVCodecContext *enc;
char buf[1024];
/* feed status */
stream = first_feed;
while (stream != NULL) {
url_fprintf(pb, "<H1>Feed '%s'</H1>\n", stream->filename);
url_fprintf(pb, "<TABLE>\n");
url_fprintf(pb, "<TR><TD>Parameters<TD>Frame count<TD>Size<TD>Avg bitrate (kbits/s)\n");
for(i=0;i<stream->nb_streams;i++) {
AVStream *st = stream->streams[i];
FeedData *fdata = st->priv_data;
enc = &st->codec;
avcodec_string(buf, sizeof(buf), enc);
avg = fdata->avg_frame_size * (float)enc->rate * 8.0;
if (enc->codec->type == CODEC_TYPE_AUDIO && enc->frame_size > 0)
avg /= enc->frame_size;
url_fprintf(pb, "<TR><TD>%s <TD> %d <TD> %Ld <TD> %0.1f\n",
buf, enc->frame_number, fdata->data_count, avg / 1000.0);
}
url_fprintf(pb, "</TABLE>\n");
stream = stream->next_feed;
}
}
#endif
/* connection status */
url_fprintf(pb, "<H2>Connection Status</H2>\n");
url_fprintf(pb, "Number of connections: %d / %d<BR>\n",
nb_connections, nb_max_connections);
url_fprintf(pb, "Bandwidth in use: %dk / %dk<BR>\n",
current_bandwidth, max_bandwidth);
url_fprintf(pb, "<TABLE>\n");
url_fprintf(pb, "<TR><th>#<th>File<th>IP<th>Proto<th>State<th>Target bits/sec<th>Actual bits/sec<th>Bytes transferred\n");
c1 = first_http_ctx;
i = 0;
while (c1 != NULL) {
int bitrate;
int j;
bitrate = 0;
if (c1->stream) {
for (j = 0; j < c1->stream->nb_streams; j++) {
if (!c1->stream->feed) {
bitrate += c1->stream->streams[j]->codec.bit_rate;
} else {
if (c1->feed_streams[j] >= 0) {
bitrate += c1->stream->feed->streams[c1->feed_streams[j]]->codec.bit_rate;
}
}
}
}
i++;
p = inet_ntoa(c1->from_addr.sin_addr);
url_fprintf(pb, "<TR><TD><B>%d</B><TD>%s%s<TD>%s<TD>%s<TD>%s<td align=right>",
i,
c1->stream ? c1->stream->filename : "",
c1->state == HTTPSTATE_RECEIVE_DATA ? "(input)" : "",
p,
c1->protocol,
http_state[c1->state]);
fmt_bytecount(pb, bitrate);
url_fprintf(pb, "<td align=right>");
fmt_bytecount(pb, compute_datarate(&c1->datarate, c1->data_count) * 8);
url_fprintf(pb, "<td align=right>");
fmt_bytecount(pb, c1->data_count);
url_fprintf(pb, "\n");
c1 = c1->next;
}
url_fprintf(pb, "</TABLE>\n");
/* date */
ti = time(NULL);
p = ctime(&ti);
url_fprintf(pb, "<HR size=1 noshade>Generated at %s", p);
url_fprintf(pb, "</BODY>\n</HTML>\n");
len = url_close_dyn_buf(pb, &c->pb_buffer);
c->buffer_ptr = c->pb_buffer;
c->buffer_end = c->pb_buffer + len;
}
/* check if the parser needs to be opened for stream i */
static void open_parser(AVFormatContext *s, int i)
{
AVStream *st = s->streams[i];
AVCodec *codec;
if (!st->codec.codec) {
codec = avcodec_find_decoder(st->codec.codec_id);
if (codec && (codec->capabilities & CODEC_CAP_PARSE_ONLY)) {
st->codec.parse_only = 1;
if (avcodec_open(&st->codec, codec) < 0) {
st->codec.parse_only = 0;
}
}
}
}
static int open_input_stream(HTTPContext *c, const char *info)
{
char buf[128];
char input_filename[1024];
AVFormatContext *s;
int buf_size, i;
int64_t stream_pos;
/* find file name */
if (c->stream->feed) {
strcpy(input_filename, c->stream->feed->feed_filename);
buf_size = FFM_PACKET_SIZE;
/* compute position (absolute time) */
if (find_info_tag(buf, sizeof(buf), "date", info)) {
stream_pos = parse_date(buf, 0);
} else if (find_info_tag(buf, sizeof(buf), "buffer", info)) {
int prebuffer = strtol(buf, 0, 10);
stream_pos = av_gettime() - prebuffer * (int64_t)1000000;
} else {
stream_pos = av_gettime() - c->stream->prebuffer * (int64_t)1000;
}
} else {
strcpy(input_filename, c->stream->feed_filename);
buf_size = 0;
/* compute position (relative time) */
if (find_info_tag(buf, sizeof(buf), "date", info)) {
stream_pos = parse_date(buf, 1);
} else {
stream_pos = 0;
}
}
if (input_filename[0] == '\0')
return -1;
#if 0
{ time_t when = stream_pos / 1000000;
http_log("Stream pos = %lld, time=%s", stream_pos, ctime(&when));
}
#endif
/* open stream */
if (av_open_input_file(&s, input_filename, c->stream->ifmt,
buf_size, c->stream->ap_in) < 0) {
http_log("%s not found", input_filename);
return -1;
}
c->fmt_in = s;
/* open each parser */
for(i=0;i<s->nb_streams;i++)
open_parser(s, i);
/* choose stream as clock source (we favorize video stream if
present) for packet sending */
c->pts_stream_index = 0;
for(i=0;i<c->stream->nb_streams;i++) {
if (c->pts_stream_index == 0 &&
c->stream->streams[i]->codec.codec_type == CODEC_TYPE_VIDEO) {
c->pts_stream_index = i;
}
}
#if 0
if (c->fmt_in->iformat->read_seek) {
c->fmt_in->iformat->read_seek(c->fmt_in, stream_pos);
}
#endif
/* set the start time (needed for maxtime and RTP packet timing) */
c->start_time = cur_time;
c->first_pts = AV_NOPTS_VALUE;
return 0;
}
/* return the server clock (in us) */
static int64_t get_server_clock(HTTPContext *c)
{
/* compute current pts value from system time */
return (int64_t)(cur_time - c->start_time) * 1000LL;
}
/* return the estimated time at which the current packet must be sent
(in us) */
static int64_t get_packet_send_clock(HTTPContext *c)
{
int bytes_left, bytes_sent, frame_bytes;
frame_bytes = c->cur_frame_bytes;
if (frame_bytes <= 0) {
return c->cur_pts;
} else {
bytes_left = c->buffer_end - c->buffer_ptr;
bytes_sent = frame_bytes - bytes_left;
return c->cur_pts + (c->cur_frame_duration * bytes_sent) / frame_bytes;
}
}
static int http_prepare_data(HTTPContext *c)
{
int i, len, ret;
AVFormatContext *ctx;
av_freep(&c->pb_buffer);
switch(c->state) {
case HTTPSTATE_SEND_DATA_HEADER:
memset(&c->fmt_ctx, 0, sizeof(c->fmt_ctx));
pstrcpy(c->fmt_ctx.author, sizeof(c->fmt_ctx.author),
c->stream->author);
pstrcpy(c->fmt_ctx.comment, sizeof(c->fmt_ctx.comment),
c->stream->comment);
pstrcpy(c->fmt_ctx.copyright, sizeof(c->fmt_ctx.copyright),
c->stream->copyright);
pstrcpy(c->fmt_ctx.title, sizeof(c->fmt_ctx.title),
c->stream->title);
/* open output stream by using specified codecs */
c->fmt_ctx.oformat = c->stream->fmt;
c->fmt_ctx.nb_streams = c->stream->nb_streams;
for(i=0;i<c->fmt_ctx.nb_streams;i++) {
AVStream *st;
AVStream *src;
st = av_mallocz(sizeof(AVStream));
c->fmt_ctx.streams[i] = st;
/* if file or feed, then just take streams from FFStream struct */
if (!c->stream->feed ||
c->stream->feed == c->stream)
src = c->stream->streams[i];
else
src = c->stream->feed->streams[c->stream->feed_streams[i]];
*st = *src;
st->priv_data = 0;
st->codec.frame_number = 0; /* XXX: should be done in
AVStream, not in codec */
/* I'm pretty sure that this is not correct...
* However, without it, we crash
*/
st->codec.coded_frame = &dummy_frame;
}
c->got_key_frame = 0;
/* prepare header and save header data in a stream */
if (url_open_dyn_buf(&c->fmt_ctx.pb) < 0) {
/* XXX: potential leak */
return -1;
}
c->fmt_ctx.pb.is_streamed = 1;
av_set_parameters(&c->fmt_ctx, NULL);
av_write_header(&c->fmt_ctx);
len = url_close_dyn_buf(&c->fmt_ctx.pb, &c->pb_buffer);
c->buffer_ptr = c->pb_buffer;
c->buffer_end = c->pb_buffer + len;
c->state = HTTPSTATE_SEND_DATA;
c->last_packet_sent = 0;
break;
case HTTPSTATE_SEND_DATA:
/* find a new packet */
{
AVPacket pkt;
/* read a packet from the input stream */
if (c->stream->feed) {
ffm_set_write_index(c->fmt_in,
c->stream->feed->feed_write_index,
c->stream->feed->feed_size);
}
if (c->stream->max_time &&
c->stream->max_time + c->start_time - cur_time < 0) {
/* We have timed out */
c->state = HTTPSTATE_SEND_DATA_TRAILER;
} else {
redo:
if (av_read_frame(c->fmt_in, &pkt) < 0) {
if (c->stream->feed && c->stream->feed->feed_opened) {
/* if coming from feed, it means we reached the end of the
ffm file, so must wait for more data */
c->state = HTTPSTATE_WAIT_FEED;
return 1; /* state changed */
} else {
if (c->stream->loop) {
av_close_input_file(c->fmt_in);
c->fmt_in = NULL;
if (open_input_stream(c, "") < 0)
goto no_loop;
goto redo;
} else {
no_loop:
/* must send trailer now because eof or error */
c->state = HTTPSTATE_SEND_DATA_TRAILER;
}
}
} else {
/* update first pts if needed */
if (c->first_pts == AV_NOPTS_VALUE) {
c->first_pts = pkt.dts;
c->start_time = cur_time;
}
/* send it to the appropriate stream */
if (c->stream->feed) {
/* if coming from a feed, select the right stream */
if (c->switch_pending) {
c->switch_pending = 0;
for(i=0;i<c->stream->nb_streams;i++) {
if (c->switch_feed_streams[i] == pkt.stream_index) {
if (pkt.flags & PKT_FLAG_KEY) {
do_switch_stream(c, i);
}
}
if (c->switch_feed_streams[i] >= 0) {
c->switch_pending = 1;
}
}
}
for(i=0;i<c->stream->nb_streams;i++) {
if (c->feed_streams[i] == pkt.stream_index) {
pkt.stream_index = i;
if (pkt.flags & PKT_FLAG_KEY) {
c->got_key_frame |= 1 << i;
}
/* See if we have all the key frames, then
* we start to send. This logic is not quite
* right, but it works for the case of a
* single video stream with one or more
* audio streams (for which every frame is
* typically a key frame).
*/
if (!c->stream->send_on_key ||
((c->got_key_frame + 1) >> c->stream->nb_streams)) {
goto send_it;
}
}
}
} else {
AVCodecContext *codec;
send_it:
/* specific handling for RTP: we use several
output stream (one for each RTP
connection). XXX: need more abstract handling */
if (c->is_packetized) {
AVStream *st;
/* compute send time and duration */
st = c->fmt_in->streams[pkt.stream_index];
c->cur_pts = pkt.dts;
if (st->start_time != AV_NOPTS_VALUE)
c->cur_pts -= st->start_time;
c->cur_frame_duration = pkt.duration;
#if 0
printf("index=%d pts=%0.3f duration=%0.6f\n",
pkt.stream_index,
(double)c->cur_pts /
AV_TIME_BASE,
(double)c->cur_frame_duration /
AV_TIME_BASE);
#endif
/* find RTP context */
c->packet_stream_index = pkt.stream_index;
ctx = c->rtp_ctx[c->packet_stream_index];
if(!ctx) {
av_free_packet(&pkt);
break;
}
codec = &ctx->streams[0]->codec;
/* only one stream per RTP connection */
pkt.stream_index = 0;
} else {
ctx = &c->fmt_ctx;
/* Fudge here */
codec = &ctx->streams[pkt.stream_index]->codec;
}
codec->coded_frame->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0);
if (c->is_packetized) {
int max_packet_size;
if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP)
max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
else
max_packet_size = url_get_max_packet_size(c->rtp_handles[c->packet_stream_index]);
ret = url_open_dyn_packet_buf(&ctx->pb, max_packet_size);
} else {
ret = url_open_dyn_buf(&ctx->pb);
}
if (ret < 0) {
/* XXX: potential leak */
return -1;
}
if (av_write_frame(ctx, &pkt)) {
c->state = HTTPSTATE_SEND_DATA_TRAILER;
}
len = url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
c->cur_frame_bytes = len;
c->buffer_ptr = c->pb_buffer;
c->buffer_end = c->pb_buffer + len;
codec->frame_number++;
if (len == 0)
goto redo;
}
av_free_packet(&pkt);
}
}
}
break;
default:
case HTTPSTATE_SEND_DATA_TRAILER:
/* last packet test ? */
if (c->last_packet_sent || c->is_packetized)
return -1;
ctx = &c->fmt_ctx;
/* prepare header */
if (url_open_dyn_buf(&ctx->pb) < 0) {
/* XXX: potential leak */
return -1;
}
av_write_trailer(ctx);
len = url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
c->buffer_ptr = c->pb_buffer;
c->buffer_end = c->pb_buffer + len;
c->last_packet_sent = 1;
break;
}
return 0;
}
/* in bit/s */
#define SHORT_TERM_BANDWIDTH 8000000
/* should convert the format at the same time */
/* send data starting at c->buffer_ptr to the output connection
(either UDP or TCP connection) */
static int http_send_data(HTTPContext *c)
{
int len, ret;
for(;;) {
if (c->buffer_ptr >= c->buffer_end) {
ret = http_prepare_data(c);
if (ret < 0)
return -1;
else if (ret != 0) {
/* state change requested */
break;
}
} else {
if (c->is_packetized) {
/* RTP data output */
len = c->buffer_end - c->buffer_ptr;
if (len < 4) {
/* fail safe - should never happen */
fail1:
c->buffer_ptr = c->buffer_end;
return 0;
}
len = (c->buffer_ptr[0] << 24) |
(c->buffer_ptr[1] << 16) |
(c->buffer_ptr[2] << 8) |
(c->buffer_ptr[3]);
if (len > (c->buffer_end - c->buffer_ptr))
goto fail1;
if ((get_packet_send_clock(c) - get_server_clock(c)) > 0) {
/* nothing to send yet: we can wait */
return 0;
}
c->data_count += len;
update_datarate(&c->datarate, c->data_count);
if (c->stream)
c->stream->bytes_served += len;
if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP) {
/* RTP packets are sent inside the RTSP TCP connection */
ByteIOContext pb1, *pb = &pb1;
int interleaved_index, size;
uint8_t header[4];
HTTPContext *rtsp_c;
rtsp_c = c->rtsp_c;
/* if no RTSP connection left, error */
if (!rtsp_c)
return -1;
/* if already sending something, then wait. */
if (rtsp_c->state != RTSPSTATE_WAIT_REQUEST) {
break;
}
if (url_open_dyn_buf(pb) < 0)
goto fail1;
interleaved_index = c->packet_stream_index * 2;
/* RTCP packets are sent at odd indexes */
if (c->buffer_ptr[1] == 200)
interleaved_index++;
/* write RTSP TCP header */
header[0] = '$';
header[1] = interleaved_index;
header[2] = len >> 8;
header[3] = len;
put_buffer(pb, header, 4);
/* write RTP packet data */
c->buffer_ptr += 4;
put_buffer(pb, c->buffer_ptr, len);
size = url_close_dyn_buf(pb, &c->packet_buffer);
/* prepare asynchronous TCP sending */
rtsp_c->packet_buffer_ptr = c->packet_buffer;
rtsp_c->packet_buffer_end = c->packet_buffer + size;
c->buffer_ptr += len;
/* send everything we can NOW */
len = write(rtsp_c->fd, rtsp_c->packet_buffer_ptr,
rtsp_c->packet_buffer_end - rtsp_c->packet_buffer_ptr);
if (len > 0) {
rtsp_c->packet_buffer_ptr += len;
}
if (rtsp_c->packet_buffer_ptr < rtsp_c->packet_buffer_end) {
/* if we could not send all the data, we will
send it later, so a new state is needed to
"lock" the RTSP TCP connection */
rtsp_c->state = RTSPSTATE_SEND_PACKET;
break;
} else {
/* all data has been sent */
av_freep(&c->packet_buffer);
}
} else {
/* send RTP packet directly in UDP */
c->buffer_ptr += 4;
url_write(c->rtp_handles[c->packet_stream_index],
c->buffer_ptr, len);
c->buffer_ptr += len;
/* here we continue as we can send several packets per 10 ms slot */
}
} else {
/* TCP data output */
len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
if (len < 0) {
if (errno != EAGAIN && errno != EINTR) {
/* error : close connection */
return -1;
} else {
return 0;
}
} else {
c->buffer_ptr += len;
}
c->data_count += len;
update_datarate(&c->datarate, c->data_count);
if (c->stream)
c->stream->bytes_served += len;
break;
}
}
} /* for(;;) */
return 0;
}
static int http_start_receive_data(HTTPContext *c)
{
int fd;
if (c->stream->feed_opened)
return -1;
/* Don't permit writing to this one */
if (c->stream->readonly)
return -1;
/* open feed */
fd = open(c->stream->feed_filename, O_RDWR);
if (fd < 0)
return -1;
c->feed_fd = fd;
c->stream->feed_write_index = ffm_read_write_index(fd);
c->stream->feed_size = lseek(fd, 0, SEEK_END);
lseek(fd, 0, SEEK_SET);
/* init buffer input */
c->buffer_ptr = c->buffer;
c->buffer_end = c->buffer + FFM_PACKET_SIZE;
c->stream->feed_opened = 1;
return 0;
}
static int http_receive_data(HTTPContext *c)
{
HTTPContext *c1;
if (c->buffer_end > c->buffer_ptr) {
int len;
len = read(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
if (len < 0) {
if (errno != EAGAIN && errno != EINTR) {
/* error : close connection */
goto fail;
}
} else if (len == 0) {
/* end of connection : close it */
goto fail;
} else {
c->buffer_ptr += len;
c->data_count += len;
update_datarate(&c->datarate, c->data_count);
}
}
if (c->buffer_ptr - c->buffer >= 2 && c->data_count > FFM_PACKET_SIZE) {
if (c->buffer[0] != 'f' ||
c->buffer[1] != 'm') {
http_log("Feed stream has become desynchronized -- disconnecting\n");
goto fail;
}
}
if (c->buffer_ptr >= c->buffer_end) {
FFStream *feed = c->stream;
/* a packet has been received : write it in the store, except
if header */
if (c->data_count > FFM_PACKET_SIZE) {
// printf("writing pos=0x%Lx size=0x%Lx\n", feed->feed_write_index, feed->feed_size);
/* XXX: use llseek or url_seek */
lseek(c->feed_fd, feed->feed_write_index, SEEK_SET);
write(c->feed_fd, c->buffer, FFM_PACKET_SIZE);
feed->feed_write_index += FFM_PACKET_SIZE;
/* update file size */
if (feed->feed_write_index > c->stream->feed_size)
feed->feed_size = feed->feed_write_index;
/* handle wrap around if max file size reached */
if (feed->feed_write_index >= c->stream->feed_max_size)
feed->feed_write_index = FFM_PACKET_SIZE;
/* write index */
ffm_write_write_index(c->feed_fd, feed->feed_write_index);
/* wake up any waiting connections */
for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) {
if (c1->state == HTTPSTATE_WAIT_FEED &&
c1->stream->feed == c->stream->feed) {
c1->state = HTTPSTATE_SEND_DATA;
}
}
} else {
/* We have a header in our hands that contains useful data */
AVFormatContext s;
AVInputFormat *fmt_in;
ByteIOContext *pb = &s.pb;
int i;
memset(&s, 0, sizeof(s));
url_open_buf(pb, c->buffer, c->buffer_end - c->buffer, URL_RDONLY);
pb->buf_end = c->buffer_end; /* ?? */
pb->is_streamed = 1;
/* use feed output format name to find corresponding input format */
fmt_in = av_find_input_format(feed->fmt->name);
if (!fmt_in)
goto fail;
if (fmt_in->priv_data_size > 0) {
s.priv_data = av_mallocz(fmt_in->priv_data_size);
if (!s.priv_data)
goto fail;
} else
s.priv_data = NULL;
if (fmt_in->read_header(&s, 0) < 0) {
av_freep(&s.priv_data);
goto fail;
}
/* Now we have the actual streams */
if (s.nb_streams != feed->nb_streams) {
av_freep(&s.priv_data);
goto fail;
}
for (i = 0; i < s.nb_streams; i++) {
memcpy(&feed->streams[i]->codec,
&s.streams[i]->codec, sizeof(AVCodecContext));
}
av_freep(&s.priv_data);
}
c->buffer_ptr = c->buffer;
}
return 0;
fail:
c->stream->feed_opened = 0;
close(c->feed_fd);
return -1;
}
/********************************************************************/
/* RTSP handling */
static void rtsp_reply_header(HTTPContext *c, enum RTSPStatusCode error_number)
{
const char *str;
time_t ti;
char *p;
char buf2[32];
switch(error_number) {
#define DEF(n, c, s) case c: str = s; break;
#include "rtspcodes.h"
#undef DEF
default:
str = "Unknown Error";
break;
}
url_fprintf(c->pb, "RTSP/1.0 %d %s\r\n", error_number, str);
url_fprintf(c->pb, "CSeq: %d\r\n", c->seq);
/* output GMT time */
ti = time(NULL);
p = ctime(&ti);
strcpy(buf2, p);
p = buf2 + strlen(p) - 1;
if (*p == '\n')
*p = '\0';
url_fprintf(c->pb, "Date: %s GMT\r\n", buf2);
}
static void rtsp_reply_error(HTTPContext *c, enum RTSPStatusCode error_number)
{
rtsp_reply_header(c, error_number);
url_fprintf(c->pb, "\r\n");
}
static int rtsp_parse_request(HTTPContext *c)
{
const char *p, *p1, *p2;
char cmd[32];
char url[1024];
char protocol[32];
char line[1024];
ByteIOContext pb1;
int len;
RTSPHeader header1, *header = &header1;
c->buffer_ptr[0] = '\0';
p = c->buffer;
get_word(cmd, sizeof(cmd), &p);
get_word(url, sizeof(url), &p);
get_word(protocol, sizeof(protocol), &p);
pstrcpy(c->method, sizeof(c->method), cmd);
pstrcpy(c->url, sizeof(c->url), url);
pstrcpy(c->protocol, sizeof(c->protocol), protocol);
c->pb = &pb1;
if (url_open_dyn_buf(c->pb) < 0) {
/* XXX: cannot do more */
c->pb = NULL; /* safety */
return -1;
}
/* check version name */
if (strcmp(protocol, "RTSP/1.0") != 0) {
rtsp_reply_error(c, RTSP_STATUS_VERSION);
goto the_end;
}
/* parse each header line */
memset(header, 0, sizeof(RTSPHeader));
/* skip to next line */
while (*p != '\n' && *p != '\0')
p++;
if (*p == '\n')
p++;
while (*p != '\0') {
p1 = strchr(p, '\n');
if (!p1)
break;
p2 = p1;
if (p2 > p && p2[-1] == '\r')
p2--;
/* skip empty line */
if (p2 == p)
break;
len = p2 - p;
if (len > sizeof(line) - 1)
len = sizeof(line) - 1;
memcpy(line, p, len);
line[len] = '\0';
rtsp_parse_line(header, line);
p = p1 + 1;
}
/* handle sequence number */
c->seq = header->seq;
if (!strcmp(cmd, "DESCRIBE")) {
rtsp_cmd_describe(c, url);
} else if (!strcmp(cmd, "OPTIONS")) {
rtsp_cmd_options(c, url);
} else if (!strcmp(cmd, "SETUP")) {
rtsp_cmd_setup(c, url, header);
} else if (!strcmp(cmd, "PLAY")) {
rtsp_cmd_play(c, url, header);
} else if (!strcmp(cmd, "PAUSE")) {
rtsp_cmd_pause(c, url, header);
} else if (!strcmp(cmd, "TEARDOWN")) {
rtsp_cmd_teardown(c, url, header);
} else {
rtsp_reply_error(c, RTSP_STATUS_METHOD);
}
the_end:
len = url_close_dyn_buf(c->pb, &c->pb_buffer);
c->pb = NULL; /* safety */
if (len < 0) {
/* XXX: cannot do more */
return -1;
}
c->buffer_ptr = c->pb_buffer;
c->buffer_end = c->pb_buffer + len;
c->state = RTSPSTATE_SEND_REPLY;
return 0;
}
/* XXX: move that to rtsp.c, but would need to replace FFStream by
AVFormatContext */
static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
struct in_addr my_ip)
{
ByteIOContext pb1, *pb = &pb1;
int i, payload_type, port, private_payload_type, j;
const char *ipstr, *title, *mediatype;
AVStream *st;
if (url_open_dyn_buf(pb) < 0)
return -1;
/* general media info */
url_fprintf(pb, "v=0\n");
ipstr = inet_ntoa(my_ip);
url_fprintf(pb, "o=- 0 0 IN IP4 %s\n", ipstr);
title = stream->title;
if (title[0] == '\0')
title = "No Title";
url_fprintf(pb, "s=%s\n", title);
if (stream->comment[0] != '\0')
url_fprintf(pb, "i=%s\n", stream->comment);
if (stream->is_multicast) {
url_fprintf(pb, "c=IN IP4 %s\n", inet_ntoa(stream->multicast_ip));
}
/* for each stream, we output the necessary info */
private_payload_type = RTP_PT_PRIVATE;
for(i = 0; i < stream->nb_streams; i++) {
st = stream->streams[i];
if (st->codec.codec_id == CODEC_ID_MPEG2TS) {
mediatype = "video";
} else {
switch(st->codec.codec_type) {
case CODEC_TYPE_AUDIO:
mediatype = "audio";
break;
case CODEC_TYPE_VIDEO:
mediatype = "video";
break;
default:
mediatype = "application";
break;
}
}
/* NOTE: the port indication is not correct in case of
unicast. It is not an issue because RTSP gives it */
payload_type = rtp_get_payload_type(&st->codec);
if (payload_type < 0)
payload_type = private_payload_type++;
if (stream->is_multicast) {
port = stream->multicast_port + 2 * i;
} else {
port = 0;
}
url_fprintf(pb, "m=%s %d RTP/AVP %d\n",
mediatype, port, payload_type);
if (payload_type >= RTP_PT_PRIVATE) {
/* for private payload type, we need to give more info */
switch(st->codec.codec_id) {
case CODEC_ID_MPEG4:
{
uint8_t *data;
url_fprintf(pb, "a=rtpmap:%d MP4V-ES/%d\n",
payload_type, 90000);
/* we must also add the mpeg4 header */
data = st->codec.extradata;
if (data) {
url_fprintf(pb, "a=fmtp:%d config=", payload_type);
for(j=0;j<st->codec.extradata_size;j++) {
url_fprintf(pb, "%02x", data[j]);
}
url_fprintf(pb, "\n");
}
}
break;
default:
/* XXX: add other codecs ? */
goto fail;
}
}
url_fprintf(pb, "a=control:streamid=%d\n", i);
}
return url_close_dyn_buf(pb, pbuffer);
fail:
url_close_dyn_buf(pb, pbuffer);
av_free(*pbuffer);
return -1;
}
static void rtsp_cmd_options(HTTPContext *c, const char *url)
{
// rtsp_reply_header(c, RTSP_STATUS_OK);
url_fprintf(c->pb, "RTSP/1.0 %d %s\r\n", RTSP_STATUS_OK, "OK");
url_fprintf(c->pb, "CSeq: %d\r\n", c->seq);
url_fprintf(c->pb, "Public: %s\r\n", "OPTIONS, DESCRIBE, SETUP, TEARDOWN, PLAY, PAUSE");
url_fprintf(c->pb, "\r\n");
}
static void rtsp_cmd_describe(HTTPContext *c, const char *url)
{
FFStream *stream;
char path1[1024];
const char *path;
uint8_t *content;
int content_length, len;
struct sockaddr_in my_addr;
/* find which url is asked */
url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
path = path1;
if (*path == '/')
path++;
for(stream = first_stream; stream != NULL; stream = stream->next) {
if (!stream->is_feed && stream->fmt == &rtp_mux &&
!strcmp(path, stream->filename)) {
goto found;
}
}
/* no stream found */
rtsp_reply_error(c, RTSP_STATUS_SERVICE); /* XXX: right error ? */
return;
found:
/* prepare the media description in sdp format */
/* get the host IP */
len = sizeof(my_addr);
getsockname(c->fd, (struct sockaddr *)&my_addr, &len);
content_length = prepare_sdp_description(stream, &content, my_addr.sin_addr);
if (content_length < 0) {
rtsp_reply_error(c, RTSP_STATUS_INTERNAL);
return;
}
rtsp_reply_header(c, RTSP_STATUS_OK);
url_fprintf(c->pb, "Content-Type: application/sdp\r\n");
url_fprintf(c->pb, "Content-Length: %d\r\n", content_length);
url_fprintf(c->pb, "\r\n");
put_buffer(c->pb, content, content_length);
}
static HTTPContext *find_rtp_session(const char *session_id)
{
HTTPContext *c;
if (session_id[0] == '\0')
return NULL;
for(c = first_http_ctx; c != NULL; c = c->next) {
if (!strcmp(c->session_id, session_id))
return c;
}
return NULL;
}
static RTSPTransportField *find_transport(RTSPHeader *h, enum RTSPProtocol protocol)
{
RTSPTransportField *th;
int i;
for(i=0;i<h->nb_transports;i++) {
th = &h->transports[i];
if (th->protocol == protocol)
return th;
}
return NULL;
}
static void rtsp_cmd_setup(HTTPContext *c, const char *url,
RTSPHeader *h)
{
FFStream *stream;
int stream_index, port;
char buf[1024];
char path1[1024];
const char *path;
HTTPContext *rtp_c;
RTSPTransportField *th;
struct sockaddr_in dest_addr;
RTSPActionServerSetup setup;
/* find which url is asked */
url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
path = path1;
if (*path == '/')
path++;
/* now check each stream */
for(stream = first_stream; stream != NULL; stream = stream->next) {
if (!stream->is_feed && stream->fmt == &rtp_mux) {
/* accept aggregate filenames only if single stream */
if (!strcmp(path, stream->filename)) {
if (stream->nb_streams != 1) {
rtsp_reply_error(c, RTSP_STATUS_AGGREGATE);
return;
}
stream_index = 0;
goto found;
}
for(stream_index = 0; stream_index < stream->nb_streams;
stream_index++) {
snprintf(buf, sizeof(buf), "%s/streamid=%d",
stream->filename, stream_index);
if (!strcmp(path, buf))
goto found;
}
}
}
/* no stream found */
rtsp_reply_error(c, RTSP_STATUS_SERVICE); /* XXX: right error ? */
return;
found:
/* generate session id if needed */
if (h->session_id[0] == '\0') {
snprintf(h->session_id, sizeof(h->session_id),
"%08x%08x", (int)random(), (int)random());
}
/* find rtp session, and create it if none found */
rtp_c = find_rtp_session(h->session_id);
if (!rtp_c) {
/* always prefer UDP */
th = find_transport(h, RTSP_PROTOCOL_RTP_UDP);
if (!th) {
th = find_transport(h, RTSP_PROTOCOL_RTP_TCP);
if (!th) {
rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
return;
}
}
rtp_c = rtp_new_connection(&c->from_addr, stream, h->session_id,
th->protocol);
if (!rtp_c) {
rtsp_reply_error(c, RTSP_STATUS_BANDWIDTH);
return;
}
/* open input stream */
if (open_input_stream(rtp_c, "") < 0) {
rtsp_reply_error(c, RTSP_STATUS_INTERNAL);
return;
}
}
/* test if stream is OK (test needed because several SETUP needs
to be done for a given file) */
if (rtp_c->stream != stream) {
rtsp_reply_error(c, RTSP_STATUS_SERVICE);
return;
}
/* test if stream is already set up */
if (rtp_c->rtp_ctx[stream_index]) {
rtsp_reply_error(c, RTSP_STATUS_STATE);
return;
}
/* check transport */
th = find_transport(h, rtp_c->rtp_protocol);
if (!th || (th->protocol == RTSP_PROTOCOL_RTP_UDP &&
th->client_port_min <= 0)) {
rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
return;
}
/* setup default options */
setup.transport_option[0] = '\0';
dest_addr = rtp_c->from_addr;
dest_addr.sin_port = htons(th->client_port_min);
/* add transport option if needed */
if (ff_rtsp_callback) {
setup.ipaddr = ntohl(dest_addr.sin_addr.s_addr);
if (ff_rtsp_callback(RTSP_ACTION_SERVER_SETUP, rtp_c->session_id,
(char *)&setup, sizeof(setup),
stream->rtsp_option) < 0) {
rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
return;
}
dest_addr.sin_addr.s_addr = htonl(setup.ipaddr);
}
/* setup stream */
if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr, c) < 0) {
rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
return;
}
/* now everything is OK, so we can send the connection parameters */
rtsp_reply_header(c, RTSP_STATUS_OK);
/* session ID */
url_fprintf(c->pb, "Session: %s\r\n", rtp_c->session_id);
switch(rtp_c->rtp_protocol) {
case RTSP_PROTOCOL_RTP_UDP:
port = rtp_get_local_port(rtp_c->rtp_handles[stream_index]);
url_fprintf(c->pb, "Transport: RTP/AVP/UDP;unicast;"
"client_port=%d-%d;server_port=%d-%d",
th->client_port_min, th->client_port_min + 1,
port, port + 1);
break;
case RTSP_PROTOCOL_RTP_TCP:
url_fprintf(c->pb, "Transport: RTP/AVP/TCP;interleaved=%d-%d",
stream_index * 2, stream_index * 2 + 1);
break;
default:
break;
}
if (setup.transport_option[0] != '\0') {
url_fprintf(c->pb, ";%s", setup.transport_option);
}
url_fprintf(c->pb, "\r\n");
url_fprintf(c->pb, "\r\n");
}
/* find an rtp connection by using the session ID. Check consistency
with filename */
static HTTPContext *find_rtp_session_with_url(const char *url,
const char *session_id)
{
HTTPContext *rtp_c;
char path1[1024];
const char *path;
char buf[1024];
int s;
rtp_c = find_rtp_session(session_id);
if (!rtp_c)
return NULL;
/* find which url is asked */
url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
path = path1;
if (*path == '/')
path++;
if(!strcmp(path, rtp_c->stream->filename)) return rtp_c;
for(s=0; s<rtp_c->stream->nb_streams; ++s) {
snprintf(buf, sizeof(buf), "%s/streamid=%d",
rtp_c->stream->filename, s);
if(!strncmp(path, buf, sizeof(buf))) {
// XXX: Should we reply with RTSP_STATUS_ONLY_AGGREGATE if nb_streams>1?
return rtp_c;
}
}
return NULL;
}
static void rtsp_cmd_play(HTTPContext *c, const char *url, RTSPHeader *h)
{
HTTPContext *rtp_c;
rtp_c = find_rtp_session_with_url(url, h->session_id);
if (!rtp_c) {
rtsp_reply_error(c, RTSP_STATUS_SESSION);
return;
}
if (rtp_c->state != HTTPSTATE_SEND_DATA &&
rtp_c->state != HTTPSTATE_WAIT_FEED &&
rtp_c->state != HTTPSTATE_READY) {
rtsp_reply_error(c, RTSP_STATUS_STATE);
return;
}
#if 0
/* XXX: seek in stream */
if (h->range_start != AV_NOPTS_VALUE) {
printf("range_start=%0.3f\n", (double)h->range_start / AV_TIME_BASE);
av_seek_frame(rtp_c->fmt_in, -1, h->range_start);
}
#endif
rtp_c->state = HTTPSTATE_SEND_DATA;
/* now everything is OK, so we can send the connection parameters */
rtsp_reply_header(c, RTSP_STATUS_OK);
/* session ID */
url_fprintf(c->pb, "Session: %s\r\n", rtp_c->session_id);
url_fprintf(c->pb, "\r\n");
}
static void rtsp_cmd_pause(HTTPContext *c, const char *url, RTSPHeader *h)
{
HTTPContext *rtp_c;
rtp_c = find_rtp_session_with_url(url, h->session_id);
if (!rtp_c) {
rtsp_reply_error(c, RTSP_STATUS_SESSION);
return;
}
if (rtp_c->state != HTTPSTATE_SEND_DATA &&
rtp_c->state != HTTPSTATE_WAIT_FEED) {
rtsp_reply_error(c, RTSP_STATUS_STATE);
return;
}
rtp_c->state = HTTPSTATE_READY;
rtp_c->first_pts = AV_NOPTS_VALUE;
/* now everything is OK, so we can send the connection parameters */
rtsp_reply_header(c, RTSP_STATUS_OK);
/* session ID */
url_fprintf(c->pb, "Session: %s\r\n", rtp_c->session_id);
url_fprintf(c->pb, "\r\n");
}
static void rtsp_cmd_teardown(HTTPContext *c, const char *url, RTSPHeader *h)
{
HTTPContext *rtp_c;
rtp_c = find_rtp_session_with_url(url, h->session_id);
if (!rtp_c) {
rtsp_reply_error(c, RTSP_STATUS_SESSION);
return;
}
/* abort the session */
close_connection(rtp_c);
if (ff_rtsp_callback) {
ff_rtsp_callback(RTSP_ACTION_SERVER_TEARDOWN, rtp_c->session_id,
NULL, 0,
rtp_c->stream->rtsp_option);
}
/* now everything is OK, so we can send the connection parameters */
rtsp_reply_header(c, RTSP_STATUS_OK);
/* session ID */
url_fprintf(c->pb, "Session: %s\r\n", rtp_c->session_id);
url_fprintf(c->pb, "\r\n");
}
/********************************************************************/
/* RTP handling */
static HTTPContext *rtp_new_connection(struct sockaddr_in *from_addr,
FFStream *stream, const char *session_id,
enum RTSPProtocol rtp_protocol)
{
HTTPContext *c = NULL;
const char *proto_str;
/* XXX: should output a warning page when coming
close to the connection limit */
if (nb_connections >= nb_max_connections)
goto fail;
/* add a new connection */
c = av_mallocz(sizeof(HTTPContext));
if (!c)
goto fail;
c->fd = -1;
c->poll_entry = NULL;
c->from_addr = *from_addr;
c->buffer_size = IOBUFFER_INIT_SIZE;
c->buffer = av_malloc(c->buffer_size);
if (!c->buffer)
goto fail;
nb_connections++;
c->stream = stream;
pstrcpy(c->session_id, sizeof(c->session_id), session_id);
c->state = HTTPSTATE_READY;
c->is_packetized = 1;
c->rtp_protocol = rtp_protocol;
/* protocol is shown in statistics */
switch(c->rtp_protocol) {
case RTSP_PROTOCOL_RTP_UDP_MULTICAST:
proto_str = "MCAST";
break;
case RTSP_PROTOCOL_RTP_UDP:
proto_str = "UDP";
break;
case RTSP_PROTOCOL_RTP_TCP:
proto_str = "TCP";
break;
default:
proto_str = "???";
break;
}
pstrcpy(c->protocol, sizeof(c->protocol), "RTP/");
pstrcat(c->protocol, sizeof(c->protocol), proto_str);
current_bandwidth += stream->bandwidth;
c->next = first_http_ctx;
first_http_ctx = c;
return c;
fail:
if (c) {
av_free(c->buffer);
av_free(c);
}
return NULL;
}
/* add a new RTP stream in an RTP connection (used in RTSP SETUP
command). If RTP/TCP protocol is used, TCP connection 'rtsp_c' is
used. */
static int rtp_new_av_stream(HTTPContext *c,
int stream_index, struct sockaddr_in *dest_addr,
HTTPContext *rtsp_c)
{
AVFormatContext *ctx;
AVStream *st;
char *ipaddr;
URLContext *h;
uint8_t *dummy_buf;
char buf2[32];
int max_packet_size;
/* now we can open the relevant output stream */
ctx = av_alloc_format_context();
if (!ctx)
return -1;
ctx->oformat = &rtp_mux;
st = av_mallocz(sizeof(AVStream));
if (!st)
goto fail;
ctx->nb_streams = 1;
ctx->streams[0] = st;
if (!c->stream->feed ||
c->stream->feed == c->stream) {
memcpy(st, c->stream->streams[stream_index], sizeof(AVStream));
} else {
memcpy(st,
c->stream->feed->streams[c->stream->feed_streams[stream_index]],
sizeof(AVStream));
}
/* build destination RTP address */
ipaddr = inet_ntoa(dest_addr->sin_addr);
switch(c->rtp_protocol) {
case RTSP_PROTOCOL_RTP_UDP:
case RTSP_PROTOCOL_RTP_UDP_MULTICAST:
/* RTP/UDP case */
/* XXX: also pass as parameter to function ? */
if (c->stream->is_multicast) {
int ttl;
ttl = c->stream->multicast_ttl;
if (!ttl)
ttl = 16;
snprintf(ctx->filename, sizeof(ctx->filename),
"rtp://%s:%d?multicast=1&ttl=%d",
ipaddr, ntohs(dest_addr->sin_port), ttl);
} else {
snprintf(ctx->filename, sizeof(ctx->filename),
"rtp://%s:%d", ipaddr, ntohs(dest_addr->sin_port));
}
if (url_open(&h, ctx->filename, URL_WRONLY) < 0)
goto fail;
c->rtp_handles[stream_index] = h;
max_packet_size = url_get_max_packet_size(h);
break;
case RTSP_PROTOCOL_RTP_TCP:
/* RTP/TCP case */
c->rtsp_c = rtsp_c;
max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
break;
default:
goto fail;
}
http_log("%s:%d - - [%s] \"PLAY %s/streamid=%d %s\"\n",
ipaddr, ntohs(dest_addr->sin_port),
ctime1(buf2),
c->stream->filename, stream_index, c->protocol);
/* normally, no packets should be output here, but the packet size may be checked */
if (url_open_dyn_packet_buf(&ctx->pb, max_packet_size) < 0) {
/* XXX: close stream */
goto fail;
}
av_set_parameters(ctx, NULL);
if (av_write_header(ctx) < 0) {
fail:
if (h)
url_close(h);
av_free(ctx);
return -1;
}
url_close_dyn_buf(&ctx->pb, &dummy_buf);
av_free(dummy_buf);
c->rtp_ctx[stream_index] = ctx;
return 0;
}
/********************************************************************/
/* ffserver initialization */
static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec)
{
AVStream *fst;
fst = av_mallocz(sizeof(AVStream));
if (!fst)
return NULL;
fst->priv_data = av_mallocz(sizeof(FeedData));
memcpy(&fst->codec, codec, sizeof(AVCodecContext));
fst->codec.coded_frame = &dummy_frame;
fst->index = stream->nb_streams;
av_set_pts_info(fst, 33, 1, 90000);
stream->streams[stream->nb_streams++] = fst;
return fst;
}
/* return the stream number in the feed */
static int add_av_stream(FFStream *feed, AVStream *st)
{
AVStream *fst;
AVCodecContext *av, *av1;
int i;
av = &st->codec;
for(i=0;i<feed->nb_streams;i++) {
st = feed->streams[i];
av1 = &st->codec;
if (av1->codec_id == av->codec_id &&
av1->codec_type == av->codec_type &&
av1->bit_rate == av->bit_rate) {
switch(av->codec_type) {
case CODEC_TYPE_AUDIO:
if (av1->channels == av->channels &&
av1->sample_rate == av->sample_rate)
goto found;
break;
case CODEC_TYPE_VIDEO:
if (av1->width == av->width &&
av1->height == av->height &&
av1->frame_rate == av->frame_rate &&
av1->frame_rate_base == av->frame_rate_base &&
av1->gop_size == av->gop_size)
goto found;
break;
default:
av_abort();
}
}
}
fst = add_av_stream1(feed, av);
if (!fst)
return -1;
return feed->nb_streams - 1;
found:
return i;
}
static void remove_stream(FFStream *stream)
{
FFStream **ps;
ps = &first_stream;
while (*ps != NULL) {
if (*ps == stream) {
*ps = (*ps)->next;
} else {
ps = &(*ps)->next;
}
}
}
/* specific mpeg4 handling : we extract the raw parameters */
static void extract_mpeg4_header(AVFormatContext *infile)
{
int mpeg4_count, i, size;
AVPacket pkt;
AVStream *st;
const uint8_t *p;
mpeg4_count = 0;
for(i=0;i<infile->nb_streams;i++) {
st = infile->streams[i];
if (st->codec.codec_id == CODEC_ID_MPEG4 &&
st->codec.extradata_size == 0) {
mpeg4_count++;
}
}
if (!mpeg4_count)
return;
printf("MPEG4 without extra data: trying to find header in %s\n", infile->filename);
while (mpeg4_count > 0) {
if (av_read_packet(infile, &pkt) < 0)
break;
st = infile->streams[pkt.stream_index];
if (st->codec.codec_id == CODEC_ID_MPEG4 &&
st->codec.extradata_size == 0) {
av_freep(&st->codec.extradata);
/* fill extradata with the header */
/* XXX: we make hard suppositions here ! */
p = pkt.data;
while (p < pkt.data + pkt.size - 4) {
/* stop when vop header is found */
if (p[0] == 0x00 && p[1] == 0x00 &&
p[2] == 0x01 && p[3] == 0xb6) {
size = p - pkt.data;
// av_hex_dump(pkt.data, size);
st->codec.extradata = av_malloc(size);
st->codec.extradata_size = size;
memcpy(st->codec.extradata, pkt.data, size);
break;
}
p++;
}
mpeg4_count--;
}
av_free_packet(&pkt);
}
}
/* compute the needed AVStream for each file */
static void build_file_streams(void)
{
FFStream *stream, *stream_next;
AVFormatContext *infile;
int i;
/* gather all streams */
for(stream = first_stream; stream != NULL; stream = stream_next) {
stream_next = stream->next;
if (stream->stream_type == STREAM_TYPE_LIVE &&
!stream->feed) {
/* the stream comes from a file */
/* try to open the file */
/* open stream */
stream->ap_in = av_mallocz(sizeof(AVFormatParameters));
if (stream->fmt == &rtp_mux) {
/* specific case : if transport stream output to RTP,
we use a raw transport stream reader */
stream->ap_in->mpeg2ts_raw = 1;
stream->ap_in->mpeg2ts_compute_pcr = 1;
}
if (av_open_input_file(&infile, stream->feed_filename,
stream->ifmt, 0, stream->ap_in) < 0) {
http_log("%s not found", stream->feed_filename);
/* remove stream (no need to spend more time on it) */
fail:
remove_stream(stream);
} else {
/* find all the AVStreams inside and reference them in
'stream' */
if (av_find_stream_info(infile) < 0) {
http_log("Could not find codec parameters from '%s'",
stream->feed_filename);
av_close_input_file(infile);
goto fail;
}
extract_mpeg4_header(infile);
for(i=0;i<infile->nb_streams;i++) {
add_av_stream1(stream, &infile->streams[i]->codec);
}
av_close_input_file(infile);
}
}
}
}
/* compute the needed AVStream for each feed */
static void build_feed_streams(void)
{
FFStream *stream, *feed;
int i;
/* gather all streams */
for(stream = first_stream; stream != NULL; stream = stream->next) {
feed = stream->feed;
if (feed) {
if (!stream->is_feed) {
/* we handle a stream coming from a feed */
for(i=0;i<stream->nb_streams;i++) {
stream->feed_streams[i] = add_av_stream(feed, stream->streams[i]);
}
}
}
}
/* gather all streams */
for(stream = first_stream; stream != NULL; stream = stream->next) {
feed = stream->feed;
if (feed) {
if (stream->is_feed) {
for(i=0;i<stream->nb_streams;i++) {
stream->feed_streams[i] = i;
}
}
}
}
/* create feed files if needed */
for(feed = first_feed; feed != NULL; feed = feed->next_feed) {
int fd;
if (url_exist(feed->feed_filename)) {
/* See if it matches */
AVFormatContext *s;
int matches = 0;
if (av_open_input_file(&s, feed->feed_filename, NULL, FFM_PACKET_SIZE, NULL) >= 0) {
/* Now see if it matches */
if (s->nb_streams == feed->nb_streams) {
matches = 1;
for(i=0;i<s->nb_streams;i++) {
AVStream *sf, *ss;
sf = feed->streams[i];
ss = s->streams[i];
if (sf->index != ss->index ||
sf->id != ss->id) {
printf("Index & Id do not match for stream %d (%s)\n",
i, feed->feed_filename);
matches = 0;
} else {
AVCodecContext *ccf, *ccs;
ccf = &sf->codec;
ccs = &ss->codec;
#define CHECK_CODEC(x) (ccf->x != ccs->x)
if (CHECK_CODEC(codec) || CHECK_CODEC(codec_type)) {
printf("Codecs do not match for stream %d\n", i);
matches = 0;
} else if (CHECK_CODEC(bit_rate) || CHECK_CODEC(flags)) {
printf("Codec bitrates do not match for stream %d\n", i);
matches = 0;
} else if (ccf->codec_type == CODEC_TYPE_VIDEO) {
if (CHECK_CODEC(frame_rate) ||
CHECK_CODEC(frame_rate_base) ||
CHECK_CODEC(width) ||
CHECK_CODEC(height)) {
printf("Codec width, height and framerate do not match for stream %d\n", i);
matches = 0;
}
} else if (ccf->codec_type == CODEC_TYPE_AUDIO) {
if (CHECK_CODEC(sample_rate) ||
CHECK_CODEC(channels) ||
CHECK_CODEC(frame_size)) {
printf("Codec sample_rate, channels, frame_size do not match for stream %d\n", i);
matches = 0;
}
} else {
printf("Unknown codec type\n");
matches = 0;
}
}
if (!matches) {
break;
}
}
} else {
printf("Deleting feed file '%s' as stream counts differ (%d != %d)\n",
feed->feed_filename, s->nb_streams, feed->nb_streams);
}
av_close_input_file(s);
} else {
printf("Deleting feed file '%s' as it appears to be corrupt\n",
feed->feed_filename);
}
if (!matches) {
if (feed->readonly) {
printf("Unable to delete feed file '%s' as it is marked readonly\n",
feed->feed_filename);
exit(1);
}
unlink(feed->feed_filename);
}
}
if (!url_exist(feed->feed_filename)) {
AVFormatContext s1, *s = &s1;
if (feed->readonly) {
printf("Unable to create feed file '%s' as it is marked readonly\n",
feed->feed_filename);
exit(1);
}
/* only write the header of the ffm file */
if (url_fopen(&s->pb, feed->feed_filename, URL_WRONLY) < 0) {
fprintf(stderr, "Could not open output feed file '%s'\n",
feed->feed_filename);
exit(1);
}
s->oformat = feed->fmt;
s->nb_streams = feed->nb_streams;
for(i=0;i<s->nb_streams;i++) {
AVStream *st;
st = feed->streams[i];
s->streams[i] = st;
}
av_set_parameters(s, NULL);
av_write_header(s);
/* XXX: need better api */
av_freep(&s->priv_data);
url_fclose(&s->pb);
}
/* get feed size and write index */
fd = open(feed->feed_filename, O_RDONLY);
if (fd < 0) {
fprintf(stderr, "Could not open output feed file '%s'\n",
feed->feed_filename);
exit(1);
}
feed->feed_write_index = ffm_read_write_index(fd);
feed->feed_size = lseek(fd, 0, SEEK_END);
/* ensure that we do not wrap before the end of file */
if (feed->feed_max_size < feed->feed_size)
feed->feed_max_size = feed->feed_size;
close(fd);
}
}
/* compute the bandwidth used by each stream */
static void compute_bandwidth(void)
{
int bandwidth, i;
FFStream *stream;
for(stream = first_stream; stream != NULL; stream = stream->next) {
bandwidth = 0;
for(i=0;i<stream->nb_streams;i++) {
AVStream *st = stream->streams[i];
switch(st->codec.codec_type) {
case CODEC_TYPE_AUDIO:
case CODEC_TYPE_VIDEO:
bandwidth += st->codec.bit_rate;
break;
default:
break;
}
}
stream->bandwidth = (bandwidth + 999) / 1000;
}
}
static void get_arg(char *buf, int buf_size, const char **pp)
{
const char *p;
char *q;
int quote;
p = *pp;
while (isspace(*p)) p++;
q = buf;
quote = 0;
if (*p == '\"' || *p == '\'')
quote = *p++;
for(;;) {
if (quote) {
if (*p == quote)
break;
} else {
if (isspace(*p))
break;
}
if (*p == '\0')
break;
if ((q - buf) < buf_size - 1)
*q++ = *p;
p++;
}
*q = '\0';
if (quote && *p == quote)
p++;
*pp = p;
}
/* add a codec and set the default parameters */
static void add_codec(FFStream *stream, AVCodecContext *av)
{
AVStream *st;
/* compute default parameters */
switch(av->codec_type) {
case CODEC_TYPE_AUDIO:
if (av->bit_rate == 0)
av->bit_rate = 64000;
if (av->sample_rate == 0)
av->sample_rate = 22050;
if (av->channels == 0)
av->channels = 1;
break;
case CODEC_TYPE_VIDEO:
if (av->bit_rate == 0)
av->bit_rate = 64000;
if (av->frame_rate == 0){
av->frame_rate = 5;
av->frame_rate_base = 1;
}
if (av->width == 0 || av->height == 0) {
av->width = 160;
av->height = 128;
}
/* Bitrate tolerance is less for streaming */
if (av->bit_rate_tolerance == 0)
av->bit_rate_tolerance = av->bit_rate / 4;
if (av->qmin == 0)
av->qmin = 3;
if (av->qmax == 0)
av->qmax = 31;
if (av->max_qdiff == 0)
av->max_qdiff = 3;
av->qcompress = 0.5;
av->qblur = 0.5;
if (!av->rc_eq)
av->rc_eq = "tex^qComp";
if (!av->i_quant_factor)
av->i_quant_factor = -0.8;
if (!av->b_quant_factor)
av->b_quant_factor = 1.25;
if (!av->b_quant_offset)
av->b_quant_offset = 1.25;
if (!av->rc_max_rate)
av->rc_max_rate = av->bit_rate * 2;
break;
default:
av_abort();
}
st = av_mallocz(sizeof(AVStream));
if (!st)
return;
stream->streams[stream->nb_streams++] = st;
memcpy(&st->codec, av, sizeof(AVCodecContext));
}
static int opt_audio_codec(const char *arg)
{
AVCodec *p;
p = first_avcodec;
while (p) {
if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_AUDIO)
break;
p = p->next;
}
if (p == NULL) {
return CODEC_ID_NONE;
}
return p->id;
}
static int opt_video_codec(const char *arg)
{
AVCodec *p;
p = first_avcodec;
while (p) {
if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_VIDEO)
break;
p = p->next;
}
if (p == NULL) {
return CODEC_ID_NONE;
}
return p->id;
}
/* simplistic plugin support */
#ifdef CONFIG_HAVE_DLOPEN
void load_module(const char *filename)
{
void *dll;
void (*init_func)(void);
dll = dlopen(filename, RTLD_NOW);
if (!dll) {
fprintf(stderr, "Could not load module '%s' - %s\n",
filename, dlerror());
return;
}
init_func = dlsym(dll, "ffserver_module_init");
if (!init_func) {
fprintf(stderr,
"%s: init function 'ffserver_module_init()' not found\n",
filename);
dlclose(dll);
}
init_func();
}
#endif
static int parse_ffconfig(const char *filename)
{
FILE *f;
char line[1024];
char cmd[64];
char arg[1024];
const char *p;
int val, errors, line_num;
FFStream **last_stream, *stream, *redirect;
FFStream **last_feed, *feed;
AVCodecContext audio_enc, video_enc;
int audio_id, video_id;
f = fopen(filename, "r");
if (!f) {
perror(filename);
return -1;
}
errors = 0;
line_num = 0;
first_stream = NULL;
last_stream = &first_stream;
first_feed = NULL;
last_feed = &first_feed;
stream = NULL;
feed = NULL;
redirect = NULL;
audio_id = CODEC_ID_NONE;
video_id = CODEC_ID_NONE;
for(;;) {
if (fgets(line, sizeof(line), f) == NULL)
break;
line_num++;
p = line;
while (isspace(*p))
p++;
if (*p == '\0' || *p == '#')
continue;
get_arg(cmd, sizeof(cmd), &p);
if (!strcasecmp(cmd, "Port")) {
get_arg(arg, sizeof(arg), &p);
my_http_addr.sin_port = htons (atoi(arg));
} else if (!strcasecmp(cmd, "BindAddress")) {
get_arg(arg, sizeof(arg), &p);
if (!inet_aton(arg, &my_http_addr.sin_addr)) {
fprintf(stderr, "%s:%d: Invalid IP address: %s\n",
filename, line_num, arg);
errors++;
}
} else if (!strcasecmp(cmd, "NoDaemon")) {
ffserver_daemon = 0;
} else if (!strcasecmp(cmd, "RTSPPort")) {
get_arg(arg, sizeof(arg), &p);
my_rtsp_addr.sin_port = htons (atoi(arg));
} else if (!strcasecmp(cmd, "RTSPBindAddress")) {
get_arg(arg, sizeof(arg), &p);
if (!inet_aton(arg, &my_rtsp_addr.sin_addr)) {
fprintf(stderr, "%s:%d: Invalid IP address: %s\n",
filename, line_num, arg);
errors++;
}
} else if (!strcasecmp(cmd, "MaxClients")) {
get_arg(arg, sizeof(arg), &p);
val = atoi(arg);
if (val < 1 || val > HTTP_MAX_CONNECTIONS) {
fprintf(stderr, "%s:%d: Invalid MaxClients: %s\n",
filename, line_num, arg);
errors++;
} else {
nb_max_connections = val;
}
} else if (!strcasecmp(cmd, "MaxBandwidth")) {
get_arg(arg, sizeof(arg), &p);
val = atoi(arg);
if (val < 10 || val > 100000) {
fprintf(stderr, "%s:%d: Invalid MaxBandwidth: %s\n",
filename, line_num, arg);
errors++;
} else {
max_bandwidth = val;
}
} else if (!strcasecmp(cmd, "CustomLog")) {
get_arg(logfilename, sizeof(logfilename), &p);
} else if (!strcasecmp(cmd, "<Feed")) {
/*********************************************/
/* Feed related options */
char *q;
if (stream || feed) {
fprintf(stderr, "%s:%d: Already in a tag\n",
filename, line_num);
} else {
feed = av_mallocz(sizeof(FFStream));
/* add in stream list */
*last_stream = feed;
last_stream = &feed->next;
/* add in feed list */
*last_feed = feed;
last_feed = &feed->next_feed;
get_arg(feed->filename, sizeof(feed->filename), &p);
q = strrchr(feed->filename, '>');
if (*q)
*q = '\0';
feed->fmt = guess_format("ffm", NULL, NULL);
/* defaut feed file */
snprintf(feed->feed_filename, sizeof(feed->feed_filename),
"/tmp/%s.ffm", feed->filename);
feed->feed_max_size = 5 * 1024 * 1024;
feed->is_feed = 1;
feed->feed = feed; /* self feeding :-) */
}
} else if (!strcasecmp(cmd, "Launch")) {
if (feed) {
int i;
feed->child_argv = (char **) av_mallocz(64 * sizeof(char *));
feed->child_argv[0] = av_malloc(7);
strcpy(feed->child_argv[0], "ffmpeg");
for (i = 1; i < 62; i++) {
char argbuf[256];
get_arg(argbuf, sizeof(argbuf), &p);
if (!argbuf[0])
break;
feed->child_argv[i] = av_malloc(strlen(argbuf) + 1);
strcpy(feed->child_argv[i], argbuf);
}
feed->child_argv[i] = av_malloc(30 + strlen(feed->filename));
snprintf(feed->child_argv[i], 256, "http://127.0.0.1:%d/%s",
ntohs(my_http_addr.sin_port), feed->filename);
}
} else if (!strcasecmp(cmd, "ReadOnlyFile")) {
if (feed) {
get_arg(feed->feed_filename, sizeof(feed->feed_filename), &p);
feed->readonly = 1;
} else if (stream) {
get_arg(stream->feed_filename, sizeof(stream->feed_filename), &p);
}
} else if (!strcasecmp(cmd, "File")) {
if (feed) {
get_arg(feed->feed_filename, sizeof(feed->feed_filename), &p);
} else if (stream) {
get_arg(stream->feed_filename, sizeof(stream->feed_filename), &p);
}
} else if (!strcasecmp(cmd, "FileMaxSize")) {
if (feed) {
const char *p1;
double fsize;
get_arg(arg, sizeof(arg), &p);
p1 = arg;
fsize = strtod(p1, (char **)&p1);
switch(toupper(*p1)) {
case 'K':
fsize *= 1024;
break;
case 'M':
fsize *= 1024 * 1024;
break;
case 'G':
fsize *= 1024 * 1024 * 1024;
break;
}
feed->feed_max_size = (int64_t)fsize;
}
} else if (!strcasecmp(cmd, "</Feed>")) {
if (!feed) {
fprintf(stderr, "%s:%d: No corresponding <Feed> for </Feed>\n",
filename, line_num);
errors++;
#if 0
} else {
/* Make sure that we start out clean */
if (unlink(feed->feed_filename) < 0
&& errno != ENOENT) {
fprintf(stderr, "%s:%d: Unable to clean old feed file '%s': %s\n",
filename, line_num, feed->feed_filename, strerror(errno));
errors++;
}
#endif
}
feed = NULL;
} else if (!strcasecmp(cmd, "<Stream")) {
/*********************************************/
/* Stream related options */
char *q;
if (stream || feed) {
fprintf(stderr, "%s:%d: Already in a tag\n",
filename, line_num);
} else {
stream = av_mallocz(sizeof(FFStream));
*last_stream = stream;
last_stream = &stream->next;
get_arg(stream->filename, sizeof(stream->filename), &p);
q = strrchr(stream->filename, '>');
if (*q)
*q = '\0';
stream->fmt = guess_stream_format(NULL, stream->filename, NULL);
memset(&audio_enc, 0, sizeof(AVCodecContext));
memset(&video_enc, 0, sizeof(AVCodecContext));
audio_id = CODEC_ID_NONE;
video_id = CODEC_ID_NONE;
if (stream->fmt) {
audio_id = stream->fmt->audio_codec;
video_id = stream->fmt->video_codec;
}
}
} else if (!strcasecmp(cmd, "Feed")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
FFStream *sfeed;
sfeed = first_feed;
while (sfeed != NULL) {
if (!strcmp(sfeed->filename, arg))
break;
sfeed = sfeed->next_feed;
}
if (!sfeed) {
fprintf(stderr, "%s:%d: feed '%s' not defined\n",
filename, line_num, arg);
} else {
stream->feed = sfeed;
}
}
} else if (!strcasecmp(cmd, "Format")) {
get_arg(arg, sizeof(arg), &p);
if (!strcmp(arg, "status")) {
stream->stream_type = STREAM_TYPE_STATUS;
stream->fmt = NULL;
} else {
stream->stream_type = STREAM_TYPE_LIVE;
/* jpeg cannot be used here, so use single frame jpeg */
if (!strcmp(arg, "jpeg"))
strcpy(arg, "singlejpeg");
stream->fmt = guess_stream_format(arg, NULL, NULL);
if (!stream->fmt) {
fprintf(stderr, "%s:%d: Unknown Format: %s\n",
filename, line_num, arg);
errors++;
}
}
if (stream->fmt) {
audio_id = stream->fmt->audio_codec;
video_id = stream->fmt->video_codec;
}
} else if (!strcasecmp(cmd, "InputFormat")) {
stream->ifmt = av_find_input_format(arg);
if (!stream->ifmt) {
fprintf(stderr, "%s:%d: Unknown input format: %s\n",
filename, line_num, arg);
}
} else if (!strcasecmp(cmd, "FaviconURL")) {
if (stream && stream->stream_type == STREAM_TYPE_STATUS) {
get_arg(stream->feed_filename, sizeof(stream->feed_filename), &p);
} else {
fprintf(stderr, "%s:%d: FaviconURL only permitted for status streams\n",
filename, line_num);
errors++;
}
} else if (!strcasecmp(cmd, "Author")) {
if (stream) {
get_arg(stream->author, sizeof(stream->author), &p);
}
} else if (!strcasecmp(cmd, "Comment")) {
if (stream) {
get_arg(stream->comment, sizeof(stream->comment), &p);
}
} else if (!strcasecmp(cmd, "Copyright")) {
if (stream) {
get_arg(stream->copyright, sizeof(stream->copyright), &p);
}
} else if (!strcasecmp(cmd, "Title")) {
if (stream) {
get_arg(stream->title, sizeof(stream->title), &p);
}
} else if (!strcasecmp(cmd, "Preroll")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
stream->prebuffer = atof(arg) * 1000;
}
} else if (!strcasecmp(cmd, "StartSendOnKey")) {
if (stream) {
stream->send_on_key = 1;
}
} else if (!strcasecmp(cmd, "AudioCodec")) {
get_arg(arg, sizeof(arg), &p);
audio_id = opt_audio_codec(arg);
if (audio_id == CODEC_ID_NONE) {
fprintf(stderr, "%s:%d: Unknown AudioCodec: %s\n",
filename, line_num, arg);
errors++;
}
} else if (!strcasecmp(cmd, "VideoCodec")) {
get_arg(arg, sizeof(arg), &p);
video_id = opt_video_codec(arg);
if (video_id == CODEC_ID_NONE) {
fprintf(stderr, "%s:%d: Unknown VideoCodec: %s\n",
filename, line_num, arg);
errors++;
}
} else if (!strcasecmp(cmd, "MaxTime")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
stream->max_time = atof(arg) * 1000;
}
} else if (!strcasecmp(cmd, "AudioBitRate")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
audio_enc.bit_rate = atoi(arg) * 1000;
}
} else if (!strcasecmp(cmd, "AudioChannels")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
audio_enc.channels = atoi(arg);
}
} else if (!strcasecmp(cmd, "AudioSampleRate")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
audio_enc.sample_rate = atoi(arg);
}
} else if (!strcasecmp(cmd, "AudioQuality")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
// audio_enc.quality = atof(arg) * 1000;
}
} else if (!strcasecmp(cmd, "VideoBitRateRange")) {
if (stream) {
int minrate, maxrate;
get_arg(arg, sizeof(arg), &p);
if (sscanf(arg, "%d-%d", &minrate, &maxrate) == 2) {
video_enc.rc_min_rate = minrate * 1000;
video_enc.rc_max_rate = maxrate * 1000;
} else {
fprintf(stderr, "%s:%d: Incorrect format for VideoBitRateRange -- should be <min>-<max>: %s\n",
filename, line_num, arg);
errors++;
}
}
} else if (!strcasecmp(cmd, "VideoBufferSize")) {
if (stream) {
get_arg(arg, sizeof(arg), &p);
video_enc.rc_buffer_size = atoi(arg) * 1024;
}
} else if (!strcasecmp(cmd, "VideoBitRateTolerance")) {
if (stream) {
get_arg(arg, sizeof(arg), &p);
video_enc.bit_rate_tolerance = atoi(arg) * 1000;
}
} else if (!strcasecmp(cmd, "VideoBitRate")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
video_enc.bit_rate = atoi(arg) * 1000;
}
} else if (!strcasecmp(cmd, "VideoSize")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
parse_image_size(&video_enc.width, &video_enc.height, arg);
if ((video_enc.width % 16) != 0 ||
(video_enc.height % 16) != 0) {
fprintf(stderr, "%s:%d: Image size must be a multiple of 16\n",
filename, line_num);
errors++;
}
}
} else if (!strcasecmp(cmd, "VideoFrameRate")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
video_enc.frame_rate_base= DEFAULT_FRAME_RATE_BASE;
video_enc.frame_rate = (int)(strtod(arg, NULL) * video_enc.frame_rate_base);
}
} else if (!strcasecmp(cmd, "VideoGopSize")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
video_enc.gop_size = atoi(arg);
}
} else if (!strcasecmp(cmd, "VideoIntraOnly")) {
if (stream) {
video_enc.gop_size = 1;
}
} else if (!strcasecmp(cmd, "VideoHighQuality")) {
if (stream) {
video_enc.mb_decision = FF_MB_DECISION_BITS;
}
} else if (!strcasecmp(cmd, "Video4MotionVector")) {
if (stream) {
video_enc.mb_decision = FF_MB_DECISION_BITS; //FIXME remove
video_enc.flags |= CODEC_FLAG_4MV;
}
} else if (!strcasecmp(cmd, "VideoQDiff")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
video_enc.max_qdiff = atoi(arg);
if (video_enc.max_qdiff < 1 || video_enc.max_qdiff > 31) {
fprintf(stderr, "%s:%d: VideoQDiff out of range\n",
filename, line_num);
errors++;
}
}
} else if (!strcasecmp(cmd, "VideoQMax")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
video_enc.qmax = atoi(arg);
if (video_enc.qmax < 1 || video_enc.qmax > 31) {
fprintf(stderr, "%s:%d: VideoQMax out of range\n",
filename, line_num);
errors++;
}
}
} else if (!strcasecmp(cmd, "VideoQMin")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
video_enc.qmin = atoi(arg);
if (video_enc.qmin < 1 || video_enc.qmin > 31) {
fprintf(stderr, "%s:%d: VideoQMin out of range\n",
filename, line_num);
errors++;
}
}
} else if (!strcasecmp(cmd, "LumaElim")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
video_enc.luma_elim_threshold = atoi(arg);
}
} else if (!strcasecmp(cmd, "ChromaElim")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
video_enc.chroma_elim_threshold = atoi(arg);
}
} else if (!strcasecmp(cmd, "LumiMask")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
video_enc.lumi_masking = atof(arg);
}
} else if (!strcasecmp(cmd, "DarkMask")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
video_enc.dark_masking = atof(arg);
}
} else if (!strcasecmp(cmd, "NoVideo")) {
video_id = CODEC_ID_NONE;
} else if (!strcasecmp(cmd, "NoAudio")) {
audio_id = CODEC_ID_NONE;
} else if (!strcasecmp(cmd, "ACL")) {
IPAddressACL acl;
struct hostent *he;
get_arg(arg, sizeof(arg), &p);
if (strcasecmp(arg, "allow") == 0) {
acl.action = IP_ALLOW;
} else if (strcasecmp(arg, "deny") == 0) {
acl.action = IP_DENY;
} else {
fprintf(stderr, "%s:%d: ACL action '%s' is not ALLOW or DENY\n",
filename, line_num, arg);
errors++;
}
get_arg(arg, sizeof(arg), &p);
he = gethostbyname(arg);
if (!he) {
fprintf(stderr, "%s:%d: ACL refers to invalid host or ip address '%s'\n",
filename, line_num, arg);
errors++;
} else {
/* Only take the first */
acl.first.s_addr = ntohl(((struct in_addr *) he->h_addr_list[0])->s_addr);
acl.last = acl.first;
}
get_arg(arg, sizeof(arg), &p);
if (arg[0]) {
he = gethostbyname(arg);
if (!he) {
fprintf(stderr, "%s:%d: ACL refers to invalid host or ip address '%s'\n",
filename, line_num, arg);
errors++;
} else {
/* Only take the first */
acl.last.s_addr = ntohl(((struct in_addr *) he->h_addr_list[0])->s_addr);
}
}
if (!errors) {
IPAddressACL *nacl = (IPAddressACL *) av_mallocz(sizeof(*nacl));
IPAddressACL **naclp = 0;
*nacl = acl;
nacl->next = 0;
if (stream) {
naclp = &stream->acl;
} else if (feed) {
naclp = &feed->acl;
} else {
fprintf(stderr, "%s:%d: ACL found not in <stream> or <feed>\n",
filename, line_num);
errors++;
}
if (naclp) {
while (*naclp)
naclp = &(*naclp)->next;
*naclp = nacl;
}
}
} else if (!strcasecmp(cmd, "RTSPOption")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
av_freep(&stream->rtsp_option);
/* XXX: av_strdup ? */
stream->rtsp_option = av_malloc(strlen(arg) + 1);
if (stream->rtsp_option) {
strcpy(stream->rtsp_option, arg);
}
}
} else if (!strcasecmp(cmd, "MulticastAddress")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
if (!inet_aton(arg, &stream->multicast_ip)) {
fprintf(stderr, "%s:%d: Invalid IP address: %s\n",
filename, line_num, arg);
errors++;
}
stream->is_multicast = 1;
stream->loop = 1; /* default is looping */
}
} else if (!strcasecmp(cmd, "MulticastPort")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
stream->multicast_port = atoi(arg);
}
} else if (!strcasecmp(cmd, "MulticastTTL")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
stream->multicast_ttl = atoi(arg);
}
} else if (!strcasecmp(cmd, "NoLoop")) {
if (stream) {
stream->loop = 0;
}
} else if (!strcasecmp(cmd, "</Stream>")) {
if (!stream) {
fprintf(stderr, "%s:%d: No corresponding <Stream> for </Stream>\n",
filename, line_num);
errors++;
}
if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm") != 0) {
if (audio_id != CODEC_ID_NONE) {
audio_enc.codec_type = CODEC_TYPE_AUDIO;
audio_enc.codec_id = audio_id;
add_codec(stream, &audio_enc);
}
if (video_id != CODEC_ID_NONE) {
video_enc.codec_type = CODEC_TYPE_VIDEO;
video_enc.codec_id = video_id;
if (!video_enc.rc_buffer_size) {
video_enc.rc_buffer_size = 40 * 1024;
}
add_codec(stream, &video_enc);
}
}
stream = NULL;
} else if (!strcasecmp(cmd, "<Redirect")) {
/*********************************************/
char *q;
if (stream || feed || redirect) {
fprintf(stderr, "%s:%d: Already in a tag\n",
filename, line_num);
errors++;
} else {
redirect = av_mallocz(sizeof(FFStream));
*last_stream = redirect;
last_stream = &redirect->next;
get_arg(redirect->filename, sizeof(redirect->filename), &p);
q = strrchr(redirect->filename, '>');
if (*q)
*q = '\0';
redirect->stream_type = STREAM_TYPE_REDIRECT;
}
} else if (!strcasecmp(cmd, "URL")) {
if (redirect) {
get_arg(redirect->feed_filename, sizeof(redirect->feed_filename), &p);
}
} else if (!strcasecmp(cmd, "</Redirect>")) {
if (!redirect) {
fprintf(stderr, "%s:%d: No corresponding <Redirect> for </Redirect>\n",
filename, line_num);
errors++;
}
if (!redirect->feed_filename[0]) {
fprintf(stderr, "%s:%d: No URL found for <Redirect>\n",
filename, line_num);
errors++;
}
redirect = NULL;
} else if (!strcasecmp(cmd, "LoadModule")) {
get_arg(arg, sizeof(arg), &p);
#ifdef CONFIG_HAVE_DLOPEN
load_module(arg);
#else
fprintf(stderr, "%s:%d: Module support not compiled into this version: '%s'\n",
filename, line_num, arg);
errors++;
#endif
} else {
fprintf(stderr, "%s:%d: Incorrect keyword: '%s'\n",
filename, line_num, cmd);
errors++;
}
}
fclose(f);
if (errors)
return -1;
else
return 0;
}
#if 0
static void write_packet(FFCodec *ffenc,
uint8_t *buf, int size)
{
PacketHeader hdr;
AVCodecContext *enc = &ffenc->enc;
uint8_t *wptr;
mk_header(&hdr, enc, size);
wptr = http_fifo.wptr;
fifo_write(&http_fifo, (uint8_t *)&hdr, sizeof(hdr), &wptr);
fifo_write(&http_fifo, buf, size, &wptr);
/* atomic modification of wptr */
http_fifo.wptr = wptr;
ffenc->data_count += size;
ffenc->avg_frame_size = ffenc->avg_frame_size * AVG_COEF + size * (1.0 - AVG_COEF);
}
#endif
static void show_banner(void)
{
printf("ffserver version " FFMPEG_VERSION ", Copyright (c) 2000-2003 Fabrice Bellard\n");
}
static void show_help(void)
{
show_banner();
printf("usage: ffserver [-L] [-h] [-f configfile]\n"
"Hyper fast multi format Audio/Video streaming server\n"
"\n"
"-L : print the LICENSE\n"
"-h : this help\n"
"-f configfile : use configfile instead of /etc/ffserver.conf\n"
);
}
static void show_license(void)
{
show_banner();
printf(
"This library is free software; you can redistribute it and/or\n"
"modify it under the terms of the GNU Lesser General Public\n"
"License as published by the Free Software Foundation; either\n"
"version 2 of the License, or (at your option) any later version.\n"
"\n"
"This library is distributed in the hope that it will be useful,\n"
"but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n"
"Lesser General Public License for more details.\n"
"\n"
"You should have received a copy of the GNU Lesser General Public\n"
"License along with this library; if not, write to the Free Software\n"
"Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n"
);
}
static void handle_child_exit(int sig)
{
pid_t pid;
int status;
while ((pid = waitpid(-1, &status, WNOHANG)) > 0) {
FFStream *feed;
for (feed = first_feed; feed; feed = feed->next) {
if (feed->pid == pid) {
int uptime = time(0) - feed->pid_start;
feed->pid = 0;
fprintf(stderr, "%s: Pid %d exited with status %d after %d seconds\n", feed->filename, pid, status, uptime);
if (uptime < 30) {
/* Turn off any more restarts */
feed->child_argv = 0;
}
}
}
}
need_to_start_children = 1;
}
int main(int argc, char **argv)
{
const char *config_filename;
int c;
struct sigaction sigact;
av_register_all();
config_filename = "/etc/ffserver.conf";
my_program_name = argv[0];
my_program_dir = getcwd(0, 0);
ffserver_daemon = 1;
for(;;) {
c = getopt(argc, argv, "ndLh?f:");
if (c == -1)
break;
switch(c) {
case 'L':
show_license();
exit(1);
case '?':
case 'h':
show_help();
exit(1);
case 'n':
no_launch = 1;
break;
case 'd':
ffserver_debug = 1;
ffserver_daemon = 0;
break;
case 'f':
config_filename = optarg;
break;
default:
exit(2);
}
}
putenv("http_proxy"); /* Kill the http_proxy */
srandom(gettime_ms() + (getpid() << 16));
/* address on which the server will handle HTTP connections */
my_http_addr.sin_family = AF_INET;
my_http_addr.sin_port = htons (8080);
my_http_addr.sin_addr.s_addr = htonl (INADDR_ANY);
/* address on which the server will handle RTSP connections */
my_rtsp_addr.sin_family = AF_INET;
my_rtsp_addr.sin_port = htons (5454);
my_rtsp_addr.sin_addr.s_addr = htonl (INADDR_ANY);
nb_max_connections = 5;
max_bandwidth = 1000;
first_stream = NULL;
logfilename[0] = '\0';
memset(&sigact, 0, sizeof(sigact));
sigact.sa_handler = handle_child_exit;
sigact.sa_flags = SA_NOCLDSTOP | SA_RESTART;
sigaction(SIGCHLD, &sigact, 0);
if (parse_ffconfig(config_filename) < 0) {
fprintf(stderr, "Incorrect config file - exiting.\n");
exit(1);
}
build_file_streams();
build_feed_streams();
compute_bandwidth();
/* put the process in background and detach it from its TTY */
if (ffserver_daemon) {
int pid;
pid = fork();
if (pid < 0) {
perror("fork");
exit(1);
} else if (pid > 0) {
/* parent : exit */
exit(0);
} else {
/* child */
setsid();
chdir("/");
close(0);
open("/dev/null", O_RDWR);
if (strcmp(logfilename, "-") != 0) {
close(1);
dup(0);
}
close(2);
dup(0);
}
}
/* signal init */
signal(SIGPIPE, SIG_IGN);
/* open log file if needed */
if (logfilename[0] != '\0') {
if (!strcmp(logfilename, "-"))
logfile = stdout;
else
logfile = fopen(logfilename, "w");
}
if (http_server() < 0) {
fprintf(stderr, "Could not start server\n");
exit(1);
}
return 0;
}
| sofian/drone | lib/ffmpeg/ffserver.c | C | gpl-2.0 | 152,555 |
/*
* arch/arm/mach-tegra/baseband-xmm-power.c
*
* Copyright (C) 2011 NVIDIA Corporation
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/wakelock.h>
#include <linux/spinlock.h>
#include <linux/usb.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <mach/usb_phy.h>
#include "board.h"
#include "devices.h"
#include <mach/board_htc.h>
#include <linux/pm_qos_params.h>
#include <asm/mach-types.h>
#include "gpio-names.h"
#include "baseband-xmm-power.h"
MODULE_LICENSE("GPL");
unsigned long modem_ver = XMM_MODEM_VER_1130;
/*
* HTC: version history
*
* v04 - bert_lin - 20111025
* 1. remove completion & wait for probe race, use nv solution instead
* 2. add a attribute for host usb debugging
* v05 - bert_lin - 20111026
* 1. sync patch from nv michael. re-arrange the first_time var
* after flight off, device cant goes to L2 suspend
* 2. modify the files to meet the coding style
* v06 - bert_lin - 20111026
* 1. item 12: L0 -> flight -> suspend fail because of wakelock holding
* check wakelock in L3 and release it if neccessary
* v07 - bert_lin - 20111104
* workaround for item 18, AP L2->L0 fail! submit urb return -113
* add more logs on usb_chr for modem download issue
* v08 - bert_lin - 20111125
* workaround, origin l3 -> host_wake -> deepsleep
* after: L3 -> host_wakeup -> noirq suspend fail -> resume
* v09 - bert_lin - 20111214
* autopm
* v10 - bert_lin - 20111226
* log reduce
*/
/* HTC: macro, variables */
#include <mach/htc_hostdbg.h>
#define MODULE_NAME "[XMM_v15]"
unsigned int host_dbg_flag = 0;
EXPORT_SYMBOL(host_dbg_flag);
/* HTC: provide interface for user space to enable usb host debugging */
static ssize_t host_dbg_show(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t host_dbg_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size);
static DEVICE_ATTR(host_dbg, 0664, host_dbg_show, host_dbg_store);
/* HTC: Create attribute for host debug purpose */
static ssize_t host_dbg_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int ret = -EINVAL;
ret = sprintf(buf, "%x\n", host_dbg_flag);
return ret;
}
/**
* HTC: get the runtime debug flags from user.
*
* @buf: user strings
* @size: user strings plus one 0x0a char
*/
static ssize_t host_dbg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
const int hedge = 2 + 8 + 1;
/* hedge: "0x" 2 chars, max: 8 chars, plus one 0x0a char */
pr_info(MODULE_NAME "%s size = %d\n", __func__, size);
if (size > hedge) {
pr_info(MODULE_NAME "%s size > hedge:%d, return\n",
__func__, hedge);
return size;
}
host_dbg_flag = simple_strtoul(buf, NULL, 16);
pr_info(MODULE_NAME "%s set host_dbg_flag as 0x%08x\n",
__func__, host_dbg_flag);
return size;
}
/*============================================================*/
struct pm_qos_request_list modem_boost_cpu_freq_req;
EXPORT_SYMBOL_GPL(modem_boost_cpu_freq_req);
#define BOOST_CPU_FREQ_MIN 1500000
EXPORT_SYMBOL(modem_ver);
unsigned long modem_flash;
EXPORT_SYMBOL(modem_flash);
unsigned long modem_pm = 1;
EXPORT_SYMBOL(modem_pm);
unsigned long autosuspend_delay = 3000; /* 5000 msec */
EXPORT_SYMBOL(autosuspend_delay);
unsigned long enum_delay_ms = 1000; /* ignored if !modem_flash */
module_param(modem_ver, ulong, 0644);
MODULE_PARM_DESC(modem_ver,
"baseband xmm power - modem software version");
module_param(modem_flash, ulong, 0644);
MODULE_PARM_DESC(modem_flash,
"baseband xmm power - modem flash (1 = flash, 0 = flashless)");
module_param(modem_pm, ulong, 0644);
MODULE_PARM_DESC(modem_pm,
"baseband xmm power - modem power management (1 = pm, 0 = no pm)");
module_param(enum_delay_ms, ulong, 0644);
MODULE_PARM_DESC(enum_delay_ms,
"baseband xmm power - delay in ms between modem on and enumeration");
module_param(autosuspend_delay, ulong, 0644);
MODULE_PARM_DESC(autosuspend_delay, "baseband xmm power - autosuspend delay for autopm");
#define auto_sleep(x) \
if (in_interrupt() || in_atomic())\
mdelay(x);\
else\
msleep(x);
static bool short_autosuspend;
static int short_autosuspend_delay = 100;
static struct usb_device_id xmm_pm_ids[] = {
{ USB_DEVICE(VENDOR_ID, PRODUCT_ID),
.driver_info = 0 },
{}
};
//for power on modem
static struct gpio tegra_baseband_gpios[] = {
{ -1, GPIOF_OUT_INIT_LOW, "BB_RSTn" },
{ -1, GPIOF_OUT_INIT_LOW, "BB_ON" },
{ -1, GPIOF_OUT_INIT_LOW, "IPC_BB_WAKE" },
{ -1, GPIOF_IN, "IPC_AP_WAKE" },
{ -1, GPIOF_OUT_INIT_HIGH, "IPC_HSIC_ACTIVE" },
{ -1, GPIOF_OUT_INIT_LOW, "IPC_HSIC_SUS_REQ" },
{ -1, GPIOF_OUT_INIT_LOW, "BB_VDD_EN" },
{ -1, GPIOF_OUT_INIT_LOW, "AP2BB_RST_PWRDWNn" },
{ -1, GPIOF_IN, "BB2AP_RST2" },
};
/*HTC*/
//for power consumation , power off modem
static struct gpio tegra_baseband_gpios_power_off_modem[] = {
{ -1, GPIOF_OUT_INIT_LOW, "BB_RSTn" },
{ -1, GPIOF_OUT_INIT_LOW, "BB_ON" },
{ -1, GPIOF_OUT_INIT_LOW, "IPC_BB_WAKE" },
{ -1, GPIOF_OUT_INIT_LOW, "IPC_AP_WAKE" },
{ -1, GPIOF_OUT_INIT_LOW, "IPC_HSIC_ACTIVE" },
{ -1, GPIOF_OUT_INIT_LOW, "IPC_HSIC_SUS_REQ" },
{ -1, GPIOF_OUT_INIT_LOW, "BB_VDD_EN" },
{ -1, GPIOF_OUT_INIT_LOW, "AP2BB_RST_PWRDWNn" },
{ -1, GPIOF_OUT_INIT_LOW, "BB2AP_RST2" },
};
static enum {
IPC_AP_WAKE_UNINIT,
IPC_AP_WAKE_IRQ_READY,
IPC_AP_WAKE_INIT1,
IPC_AP_WAKE_INIT2,
IPC_AP_WAKE_L,
IPC_AP_WAKE_H,
} ipc_ap_wake_state = IPC_AP_WAKE_INIT2;
enum baseband_xmm_powerstate_t baseband_xmm_powerstate;
static struct workqueue_struct *workqueue;
static struct work_struct init1_work;
static struct work_struct init2_work;
static struct work_struct L2_resume_work;
//static struct delayed_work init4_work;
static struct baseband_power_platform_data *baseband_power_driver_data;
static int waiting_falling_flag = 0;
static bool register_hsic_device;
static struct wake_lock wakelock;
static struct usb_device *usbdev;
static bool CP_initiated_L2toL0;
static bool modem_power_on;
static bool first_time = true;
static int power_onoff;
static void baseband_xmm_power_L2_resume(void);
static DEFINE_MUTEX(baseband_xmm_onoff_lock);
#ifndef CONFIG_REMOVE_HSIC_L3_STATE
static int baseband_xmm_power_driver_handle_resume(
struct baseband_power_platform_data *data);
#endif
static bool wakeup_pending;
static int uart_pin_pull_state=1; // 1 for UART, 0 for GPIO
static bool modem_sleep_flag = false;
//static struct regulator *endeavor_dsi_reg = NULL;//for avdd_csi_dsi
static spinlock_t xmm_lock;
static bool system_suspending;
static int reenable_autosuspend; //ICS only
static int htcpcbid=0;
static struct workqueue_struct *workqueue_susp;
static struct work_struct work_shortsusp, work_defaultsusp;
static struct workqueue_struct *workqueue_debug;
static struct work_struct work_reset_host_active;
static int s_sku_id = 0;
static const int SKU_ID_ENRC2_GLOBAL = 0x00034600;
static const int SKU_ID_ENRC2_TMO = 0x00032900;
static const int SKU_ID_ENDEAVORU = 0x0002F300;
static struct kset *silent_reset_kset;
static struct kobject *silent_reset_kobj;
#ifndef MIN
static inline int MIN( int x, int y ) { return x > y ? y : x; }
#endif
ssize_t debug_handler(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
if( !strncmp( buf, "0", MIN( count, strlen("0") ) ) )
{
debug_gpio_dump();
}
else if( !strncmp( buf, "1", MIN( count, strlen("1") ) ) )
{
debug_gpio_dump();
trigger_radio_fatal_get_coredump();
}
else if( !strncmp( buf, "2", MIN( count, strlen("2") ) ) )
{
trigger_silent_reset("From User Space");
}
else
{
pr_info("%s: do nothing\n", __func__);
}
return count;
}
EXPORT_SYMBOL_GPL(debug_handler);
#define PRINT_GPIO(gpio,name) pr_info( "PRINT_GPIO %s <%d>", name, gpio_get_value(gpio) )
int debug_gpio_dump()
{
PRINT_GPIO( TEGRA_GPIO_PM4, "BB_VDD_EN" );
PRINT_GPIO( TEGRA_GPIO_PC1, "AP2BB_RST_PWRDWNn" );
PRINT_GPIO( TEGRA_GPIO_PN0, "AP2BB_RSTn" );
PRINT_GPIO( TEGRA_GPIO_PN3, "AP2BB_PWRON" );
PRINT_GPIO( TEGRA_GPIO_PN2, "BB2AP_RADIO_FATAL" );
PRINT_GPIO( TEGRA_GPIO_PN1, "IPC_HSIC_ACTIVE" );
PRINT_GPIO( TEGRA_GPIO_PV0, "HSIC_SUS_REQ" );
PRINT_GPIO( TEGRA_GPIO_PC6, "IPC_BB_WAKE" );
PRINT_GPIO( TEGRA_GPIO_PS2, "IPC_AP_WAKE" );
if(SKU_ID_ENDEAVORU != s_sku_id)
{
PRINT_GPIO( TEGRA_GPIO_PS5, "BB2AP_RST2" );
}
return true;
}
EXPORT_SYMBOL_GPL(debug_gpio_dump);
int trigger_radio_fatal_get_coredump(void)
{
#if 0
if (!reason)
reason = "No Reason";
pr_info("Trigger Modem Fatal!! reason <%s>", reason);
/*set BB2AP_SUSPEND_REQ Pin (TEGRA_GPIO_PV0) to OutPut High to trigger Modem fatal*/
int ret=gpio_direction_output(TEGRA_GPIO_PV0,1);
if (ret < 0)
pr_err("%s: set BB2AP_SUSPEND_REQ Pin to Output error", __func__);
/* reset HOST_ACTIVE to notify modem since suspend req is not a wakeup source of modem. */
queue_work( workqueue_debug, &work_reset_host_active );
#else
pr_info("Didn't trigger fatal for better user experience");
#endif
return 0;
}
EXPORT_SYMBOL_GPL(trigger_radio_fatal_get_coredump);
int trigger_silent_reset(char *reason)
{
#define MSIZE 30
char message[MSIZE] = "ResetReason=";
char *envp[] = { message, NULL };
int left_size = MSIZE -1 -strlen(message);
if (!reason)
reason = "No Reason";
strncat(message, reason, MIN(strlen(reason),left_size));
pr_info("%s: message<%s>", __func__, message);
if(silent_reset_kobj)
{
kobject_uevent_env( silent_reset_kobj, KOBJ_ADD, envp);
}
else
{
pr_err("%s: kobj is NULL.", __func__);
}
return 0;
}
EXPORT_SYMBOL_GPL(trigger_silent_reset);
static DEVICE_ATTR(debug_handler, S_IRUSR | S_IWUSR | S_IRGRP, NULL, debug_handler);
int Modem_is_6360()
{
return s_sku_id == SKU_ID_ENRC2_TMO;
}
EXPORT_SYMBOL_GPL(Modem_is_6360);
int Modem_is_6260()
{
return ( s_sku_id == SKU_ID_ENRC2_GLOBAL || s_sku_id == SKU_ID_ENDEAVORU );
}
EXPORT_SYMBOL_GPL(Modem_is_6260);
int Modem_is_IMC(void)
{
return ( machine_is_enrc2b() || machine_is_endeavoru() || machine_is_enrc2u() );
}
EXPORT_SYMBOL_GPL(Modem_is_IMC);
static irqreturn_t radio_reset_irq(int irq, void *dev_id)
{
pr_err("%s: Radio reset detected!", __func__);
debug_gpio_dump();
return IRQ_HANDLED;
}
#if 0
int enable_avdd_dsi_csi_power()
{
pr_info(MODULE_NAME "[xmm]%s\n",__func__);
int ret=0;
if (endeavor_dsi_reg == NULL) {
endeavor_dsi_reg = regulator_get(NULL, "avdd_dsi_csi");
pr_info(MODULE_NAME "[xmm]%s regulator_getED\n",__func__);
if (IS_ERR_OR_NULL(endeavor_dsi_reg)) {
pr_err("dsi: Could not get regulator avdd_dsi_csi\n");
endeavor_dsi_reg = NULL;
return PTR_ERR(endeavor_dsi_reg);
}
}
ret = regulator_enable(endeavor_dsi_reg);
if (ret < 0) {
printk(KERN_ERR
"DSI regulator avdd_dsi_csi couldn't be enabled\n",ret);
}
return ret;
}
int disable_avdd_dsi_csi_power()
{
pr_info(MODULE_NAME "[xmm]%s\n",__func__);
int ret=0;
if (endeavor_dsi_reg == NULL) {
endeavor_dsi_reg = regulator_get(NULL, "avdd_dsi_csi");
pr_info(MODULE_NAME "[xmm]%s regulator_getED\n",__func__);
if (IS_ERR_OR_NULL(endeavor_dsi_reg)) {
pr_err("dsi: Could not get regulator avdd_dsi_csi\n");
endeavor_dsi_reg = NULL;
return PTR_ERR(endeavor_dsi_reg);
}
}
ret = regulator_disable(endeavor_dsi_reg);
if (ret < 0) {
printk(KERN_ERR
"DSI regulator avdd_dsi_csi couldn't be disabled\n",ret);
}
endeavor_dsi_reg=NULL;
return ret;
}
#endif
int gpio_config_only_one(unsigned gpio, unsigned long flags, const char *label)
{
int err=0;
if (flags & GPIOF_DIR_IN)
err = gpio_direction_input(gpio);
else
err = gpio_direction_output(gpio,
(flags & GPIOF_INIT_HIGH) ? 1 : 0);
return err;
}
int gpio_config_only_array(struct gpio *array, size_t num)
{
int i, err=0;
for (i = 0; i < num; i++, array++) {
if( array->gpio != -1 )
{
err = gpio_config_only_one(array->gpio, array->flags, array->label);
if (err)
goto err_free;
}
}
return 0;
err_free:
//while (i--)
//gpio_free((--array)->gpio);
return err;
}
int gpio_request_only_one(unsigned gpio,const char *label)
{
int err = gpio_request(gpio, label);
if (err) return err;
else return 0;
}
int gpio_request_only_array(struct gpio *array, size_t num)
{
int i, err=0;
for (i = 0; i < num; i++, array++) {
if( array->gpio != -1 )
{
err = gpio_request_only_one(array->gpio, array->label);
if (err)
goto err_free;
}
}
return 0;
err_free:
while (i--)
gpio_free((--array)->gpio);
return err;
}
static int gpio_o_l_uart(int gpio, char* name)
{
int ret=0;
pr_info(MODULE_NAME "%s ,name=%s gpio=%d\n", __func__,name,gpio);
ret = gpio_direction_output(gpio, 0);
if (ret < 0) {
pr_err(" %s: gpio_direction_output failed %d\n", __func__, ret);
gpio_free(gpio);
return ret;
}
tegra_gpio_enable(gpio);
gpio_export(gpio, true);
return ret;
}
void modem_on_for_uart_config(void)
{
pr_info(MODULE_NAME "%s ,first_time=%s uart_pin_pull_low=%d\n", __func__,first_time?"true":"false",uart_pin_pull_state);
if(uart_pin_pull_state==0){
//if uart pin pull low, then we put back to normal
pr_info(MODULE_NAME "%s tegra_gpio_disable for UART\n", __func__);
tegra_gpio_disable(TEGRA_GPIO_PJ7);
tegra_gpio_disable(TEGRA_GPIO_PK7);
tegra_gpio_disable(TEGRA_GPIO_PB0);
tegra_gpio_disable(TEGRA_GPIO_PB1);
uart_pin_pull_state=1;//set back to UART
}
}
int modem_off_for_uart_config(void)
{
int err=0;
pr_info(MODULE_NAME "%s uart_pin_pull_low=%d\n", __func__,uart_pin_pull_state);
if(uart_pin_pull_state==1){
//if uart pin not pull low yet, then we pull them low+enable
err=gpio_o_l_uart(TEGRA_GPIO_PJ7, "IMC_UART_TX");
err=gpio_o_l_uart(TEGRA_GPIO_PK7, "IMC_UART_RTS");
err=gpio_o_l_uart(TEGRA_GPIO_PB0 ,"IMC_UART_RX");
err=gpio_o_l_uart(TEGRA_GPIO_PB1, "IMC_UART_CTS");
uart_pin_pull_state=0;//chagne to gpio
}
return err;
}
int modem_off_for_usb_config(struct gpio *array, size_t num)
{
//pr_info(MODULE_NAME "%s 1219_01\n", __func__);
int err=0;
err = gpio_config_only_array(tegra_baseband_gpios_power_off_modem,
ARRAY_SIZE(tegra_baseband_gpios_power_off_modem));
if (err < 0) {
pr_err("%s - gpio_config_array gpio(s) for modem off failed\n", __func__);
return -ENODEV;
}
return err;
}
#if 0
int modem_on_for_usb_config(struct gpio *array, size_t num)
{
pr_info(MODULE_NAME "%s \n", __func__);
int err=0;
err = gpio_config_only_array(tegra_baseband_gpios,
ARRAY_SIZE(tegra_baseband_gpios));
if (err < 0) {
pr_err("%s - gpio_config_array gpio(s) for modem off failed\n", __func__);
return -ENODEV;
}
return err;
}
#endif
int config_gpio_for_power_off(void)
{
int err=0;
pr_info(MODULE_NAME "%s for power consumation 4st \n", __func__);
#if 1
/* config baseband gpio(s) for modem off */
err = modem_off_for_usb_config(tegra_baseband_gpios_power_off_modem,
ARRAY_SIZE(tegra_baseband_gpios_power_off_modem));
if (err < 0) {
pr_err("%s - gpio_config_array gpio(s) for modem off failed\n", __func__);
return -ENODEV;
}
#endif
/* config uart gpio(s) for modem off */
err=modem_off_for_uart_config();
if (err < 0) {
pr_err("%s - modem_off_for_uart_config gpio(s)\n", __func__);
return -ENODEV;
}
return err;
}
#if 0
int config_gpio_for_power_on()
{
int err=0;
pr_info(MODULE_NAME "%s for power consumation 4st \n", __func__);
#if 1
/* config baseband gpio(s) for modem off */
err = modem_on_for_usb_config(tegra_baseband_gpios,
ARRAY_SIZE(tegra_baseband_gpios));
if (err < 0) {
pr_err("%s - gpio_config_array gpio(s) for modem off failed\n", __func__);
return -ENODEV;
}
#endif
/* config uart gpio(s) for modem off */
modem_on_for_uart_config();
return err;
}
#endif
/*HTC--*/
extern void platfrom_set_flight_mode_onoff(bool mode_on);
static int baseband_modem_power_on(struct baseband_power_platform_data *data)
{
/* HTC: called in atomic context */
int ret=0, i=0;
pr_info("%s VP: 07/05 22.52{\n", __func__);
if (!data) {
pr_err("%s: data is NULL\n", __func__);
return -1;
}
/* reset / power on sequence */
gpio_set_value(data->modem.xmm.bb_vdd_en, 1); /* give modem power */
auto_sleep(1);
gpio_set_value(data->modem.xmm.bb_rst, 0); /* set to low first */
//pr_info("%s(%d)\n", __func__, __LINE__);
for (i = 0; i < 7; i++) /* 5 ms BB_RST low */
udelay(1000);
ret = gpio_get_value(data->modem.xmm.bb_rst_pwrdn);
//pr_info("%s(%d) get AP2BB_RST_PWRDWNn=%d \n", __func__, __LINE__, ret);
//pr_info("%s(%d) set AP2BB_RST_PWRDWNn=1\n", __func__, __LINE__);
gpio_set_value(data->modem.xmm.bb_rst_pwrdn, 1); /* 20 ms RST_PWRDWNn high */
auto_sleep(25); /* need 20 but 40 is more safe */ //steven markded
//pr_info("%s(%d) set modem.xmm.bb_rst=1\n", __func__, __LINE__);
gpio_set_value(data->modem.xmm.bb_rst, 1); /* 1 ms BB_RST high */
auto_sleep(40); /* need 20 but 40 is more safe */
/* Use RST2 to identify if modem is powered on into boot rom. */
/* Fix issue of power leakage in ENRC2. */
if (machine_is_enrc2b() || machine_is_enrc2u())
{
gpio_direction_input(data->modem.xmm.bb_rst2);
}
gpio_direction_input(data->modem.xmm.ipc_ap_wake);
gpio_direction_input(TEGRA_GPIO_PN2);
//pr_info("%s(%d) set modem.xmm.bb_on=1 duration is 60us\n", __func__, __LINE__);
gpio_set_value(data->modem.xmm.bb_on, 1); /* power on sequence */
udelay(60);
gpio_set_value(data->modem.xmm.bb_on, 0);
//pr_info("%s(%d) set modem.xmm.bb_on=0\n", __func__, __LINE__);
auto_sleep(10);
//pr_info("%s:VP pm qos request CPU 1.5GHz\n", __func__);
//pm_qos_update_request(&modem_boost_cpu_freq_req, (s32)BOOST_CPU_FREQ_MIN);
/* Use RST2 to identify if modem is powered on into boot rom. */
/* Fix issue of power leakage in ENRC2. */
if (machine_is_enrc2b() || machine_is_enrc2u())
{
int counter = 0;
const int max_retry = 10;
while (!gpio_get_value(data->modem.xmm.bb_rst2) && counter < max_retry)
{
counter++;
mdelay(3);
}
if(counter == max_retry)
pr_info("%s: Wait BB2AP_RST2 timeout.", __func__);
}
gpio_direction_output(data->modem.xmm.ipc_hsic_active, 1);
modem_on_for_uart_config();
pr_info("%s }\n", __func__);
return 0;
}
static int baseband_xmm_power_on(struct platform_device *device)
{
struct baseband_power_platform_data *data
= (struct baseband_power_platform_data *)
device->dev.platform_data;
int ret; /* HTC: ENR#U wakeup src fix */
//int value;
pr_info(MODULE_NAME "%s{\n", __func__);
/* check for platform data */
if (!data) {
pr_err("%s: !data\n", __func__);
return -EINVAL;
}
if (baseband_xmm_powerstate != BBXMM_PS_UNINIT) {
pr_err("%s: baseband_xmm_powerstate != BBXMM_PS_UNINIT\n",
__func__);
return -EINVAL;
}
#if 0 /*HTC*/
pr_info(MODULE_NAME " htc_get_pcbid_info= %d\n",htcpcbid );
if(htcpcbid < PROJECT_PHASE_XE) {
enable_avdd_dsi_csi_power();
}
#endif
/* reset the state machine */
baseband_xmm_powerstate = BBXMM_PS_INIT;
first_time = true;
modem_sleep_flag = false;
/* HTC use IPC_AP_WAKE_INIT2 */
if (modem_ver < XMM_MODEM_VER_1130)
ipc_ap_wake_state = IPC_AP_WAKE_INIT1;
else
ipc_ap_wake_state = IPC_AP_WAKE_INIT2;
/* pr_info("%s - %d\n", __func__, __LINE__); */
/* register usb host controller */
if (!modem_flash) {
/* pr_info("%s - %d\n", __func__, __LINE__); */
/* register usb host controller only once */
if (register_hsic_device) {
pr_info("%s(%d)register usb host controller\n", __func__, __LINE__);
modem_power_on = true;
if (data->hsic_register)
data->modem.xmm.hsic_device =
data->hsic_register();
else
pr_err("%s: hsic_register is missing\n",
__func__);
register_hsic_device = false;
} else {
/* register usb host controller */
if (data->hsic_register)
data->modem.xmm.hsic_device =
data->hsic_register();
/* turn on modem */
pr_info("%s call baseband_modem_power_on\n", __func__);
baseband_modem_power_on(data);
}
}
if (machine_is_enrc2b() || machine_is_enrc2u())
{
pr_info("%s: register BB2AP_RST2 handler", __func__);
ret = request_irq( gpio_to_irq(TEGRA_GPIO_PS5),
radio_reset_irq,
IRQF_TRIGGER_FALLING,
"RADIO_RESET",
NULL );
if (ret < 0)
pr_err("%s: register BB2AP_RST2 handler err <%d>", __func__, ret);
}
pr_info("%s: before enable irq wake", __func__);
ret = enable_irq_wake(gpio_to_irq(data->modem.xmm.ipc_ap_wake));
if (ret < 0)
pr_err("%s: enable_irq_wake ap_wake err <%d>", __func__, ret);
ret = enable_irq_wake(gpio_to_irq(TEGRA_GPIO_PN2));
if (ret < 0)
pr_err("%s: enable_irq_wake radio_fatal err <%d>", __func__, ret);
if (machine_is_enrc2b() || machine_is_enrc2u())
{
ret = enable_irq_wake(gpio_to_irq(TEGRA_GPIO_PS5));
if (ret < 0)
pr_err("%s: enable_irq_wake radio_reset err <%d>", __func__, ret);
}
pr_info("%s }\n", __func__);
return 0;
}
static int baseband_xmm_power_off(struct platform_device *device)
{
struct baseband_power_platform_data *data;
int ret; /* HTC: ENR#U wakeup src fix */
unsigned long flags;
pr_info("%s {\n", __func__);
if (baseband_xmm_powerstate == BBXMM_PS_UNINIT) {
pr_err("%s: baseband_xmm_powerstate != BBXMM_PS_UNINIT\n",
__func__);
return -EINVAL;
}
/* check for device / platform data */
if (!device) {
pr_err("%s: !device\n", __func__);
return -EINVAL;
}
data = (struct baseband_power_platform_data *)
device->dev.platform_data;
if (!data) {
pr_err("%s: !data\n", __func__);
return -EINVAL;
}
ipc_ap_wake_state = IPC_AP_WAKE_UNINIT;
/* Set this flag to have proper flash-less first enumearation */
register_hsic_device = true;
pr_info("%s: before disable irq wake", __func__);
ret = disable_irq_wake(gpio_to_irq(data->modem.xmm.ipc_ap_wake));
if (ret < 0)
pr_err("%s: disable_irq_wake ap_wake err <%d>", __func__, ret);
ret = disable_irq_wake(gpio_to_irq(TEGRA_GPIO_PN2));
if (ret < 0)
pr_err("%s: disable_irq_wake radio_fatal err <%d>", __func__, ret);
if (machine_is_enrc2b() || machine_is_enrc2u())
{
ret = disable_irq_wake(gpio_to_irq(TEGRA_GPIO_PS5));
if (ret < 0)
pr_err("%s: disable_irq_wake radio_reset err <%d>", __func__, ret);
pr_info("%s: before free radio_reset irq", __func__);
free_irq( gpio_to_irq(TEGRA_GPIO_PS5), NULL );
if (ret < 0)
pr_err("%s: free_irq radio_reset err <%d>", __func__, ret);
}
/* unregister usb host controller */
pr_info("%s: hsic device: %x\n", __func__, (unsigned int)data->modem.xmm.hsic_device);
if (data->hsic_unregister && data->modem.xmm.hsic_device)
{
data->hsic_unregister(data->modem.xmm.hsic_device);
data->modem.xmm.hsic_device = NULL;
}
else
pr_err("%s: hsic_unregister is missing\n", __func__);
/* set IPC_HSIC_ACTIVE low */
gpio_set_value(baseband_power_driver_data->
modem.xmm.ipc_hsic_active, 0);
/* wait 20 ms */
msleep(20);
/* drive bb_rst low */
//Sophia:0118:modem power down sequence: don't need to clear BB_RST
//gpio_set_value(data->modem.xmm.bb_rst, 0);
#ifdef BB_XMM_OEM1
//msleep(1);
msleep(20);
/* turn off the modem power */
gpio_set_value(baseband_power_driver_data->modem.xmm.bb_vdd_en, 0);
msleep(68);//for IMC Modem discharge.
#else /* !BB_XMM_OEM1 */
msleep(1);
#endif /* !BB_XMM_OEM1 */
#if 1/*HTC*/
//for power consumation
pr_info("%s config_gpio_for_power_off\n", __func__);
config_gpio_for_power_off();
//err=config_gpio_for_power_off();
//if (err < 0) {
// pr_err("%s - config_gpio_for_power_off gpio(s)\n", __func__);
// return -ENODEV;
//}
#endif
/* HTC: remove platfrom_set_flight_mode_onoff for ENR */
/* platfrom_set_flight_mode_onoff(true); */
baseband_xmm_powerstate = BBXMM_PS_UNINIT;
modem_sleep_flag = false;
CP_initiated_L2toL0 = false;
spin_lock_irqsave(&xmm_lock, flags);
wakeup_pending = false;
system_suspending = false;
spin_unlock_irqrestore(&xmm_lock, flags);
register_hsic_device = true; //start reg process again for xmm on
#if 0 /*HTC*/
pr_info(MODULE_NAME " htc_get_pcbid_info= %d\n", htcpcbid);
if(htcpcbid< PROJECT_PHASE_XE) {
disable_avdd_dsi_csi_power();
}
#endif
/*set Radio fatal Pin to OutPut Low*/
ret=gpio_direction_output(TEGRA_GPIO_PN2,0);
if (ret < 0)
pr_err("%s: set Radio fatal Pin to Output error\n", __func__);
/*set BB2AP_SUSPEND_REQ Pin (TEGRA_GPIO_PV0) to OutPut Low*/
ret=gpio_direction_output(TEGRA_GPIO_PV0,0);
if (ret < 0)
pr_err("%s: set BB2AP_SUSPEND_REQ Pin to Output error\n", __func__);
pr_info("%s }\n", __func__);
return 0;
}
static ssize_t baseband_xmm_onoff(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
//int size;
struct platform_device *device = to_platform_device(dev);
mutex_lock(&baseband_xmm_onoff_lock);
/* check input */
if (buf == NULL) {
pr_err("%s: buf NULL\n", __func__);
mutex_unlock(&baseband_xmm_onoff_lock);
return -EINVAL;
}
/* pr_info("%s: count=%d\n", __func__, count); */
/* parse input */
#ifdef BB_XMM_OEM1
if (buf[0] == 0x01 || buf[0] == '1') {
/* pr_info("%s: buf[0] = 0x%x\n", __func__, buf[0]); */
power_onoff = 1;
} else
power_onoff = 0;
#else /* !BB_XMM_OEM1 */
size = sscanf(buf, "%d", &power_onoff);
if (size != 1) {
pr_err("%s: size=%d -EINVAL\n", __func__, size);
mutex_unlock(&baseband_xmm_onoff_lock);
return -EINVAL;
}
#endif /* !BB_XMM_OEM1 */
pr_info("%s power_onoff=%d count=%d, buf[0]=0x%x\n",
__func__, power_onoff, count, buf[0]);
if (power_onoff == 0)
baseband_xmm_power_off(device);
else if (power_onoff == 1)
baseband_xmm_power_on(device);
mutex_unlock(&baseband_xmm_onoff_lock);
return count;
}
static DEVICE_ATTR(xmm_onoff, S_IRUSR | S_IWUSR | S_IRGRP,
NULL, baseband_xmm_onoff);
void baseband_xmm_set_power_status(unsigned int status)
{
struct baseband_power_platform_data *data = baseband_power_driver_data;
//int value = 0;
unsigned long flags;
if (baseband_xmm_powerstate == status)
return;
pr_info(MODULE_NAME"%s{ status=%d\n", __func__,status);
switch (status) {
case BBXMM_PS_L0:
if (modem_sleep_flag) {
#ifdef CONFIG_REMOVE_HSIC_L3_STATE
pr_info("%s, resume to L0 with modem_sleep_flag", __func__ );
#else
pr_info("%s Resume from L3 without calling resume function\n", __func__);
baseband_xmm_power_driver_handle_resume(data);
#endif
}
pr_info("L0\n");
baseband_xmm_powerstate = status;
/* HTC: don't hold the wakelock multiple times */
if (!wake_lock_active(&wakelock)) {
pr_info("%s: wake_lock [%s] in L0\n",
__func__, wakelock.name);
#ifdef CONFIG_REMOVE_HSIC_L3_STATE
wake_lock_timeout(&wakelock, HZ*2);
#else
wake_lock(&wakelock);
//wake_lock_timeout(&wakelock, HZ * 5);
#endif
}
if (modem_power_on) {
modem_power_on = false;
baseband_modem_power_on(data);
}
//pr_info("gpio host active high->\n");
/* hack to restart autosuspend after exiting LP0
* (aka re-entering L0 from L3)
*/
#if 0
//remove on 0305
if (usbdev) {
struct usb_interface *intf;
intf = usb_ifnum_to_if(usbdev, 0);
//pr_info("%s - autopm_get - usbdev = %d - %d {\n", __func__, usbdev, __LINE__);
//pr_info("%s: cnt %d intf=%p &intf->dev=%p kobje=%s\n",
//__func__, atomic_read(&intf->dev.power.usage_count),intf,&intf->dev,kobject_name(&intf->dev.kobj));
if (usb_autopm_get_interface_async(intf) >= 0) {
pr_info("get_interface_async succeeded"
" - call put_interface\n");
//pr_info("%s - usb_put - usbdev = %d - %d {\n", __func__, usbdev, __LINE__);
usb_autopm_put_interface_async(intf);
//pr_info("%s - usb_put - usbdev = %d - %d {\n", __func__, usbdev, __LINE__);
} else {
pr_info("get_interface_async failed"
" - do not call put_interface\n");
}
}
#endif
break;
case BBXMM_PS_L2:
pr_info("L2 wake_unlock[%s]\n", wakelock.name);
baseband_xmm_powerstate = status;
spin_lock_irqsave(&xmm_lock, flags);
if (wakeup_pending) {
spin_unlock_irqrestore(&xmm_lock, flags);
#ifdef CONFIG_REMOVE_HSIC_L3_STATE
pr_info("%s: wakeup pending\n", __func__);
#endif
baseband_xmm_power_L2_resume();
} else {
spin_unlock_irqrestore(&xmm_lock, flags);
wake_unlock(&wakelock);
modem_sleep_flag = true;
}
if (short_autosuspend && (&usbdev->dev)) {
pr_info("autosuspend delay %d ms,disable short_autosuspend\n", (int)autosuspend_delay);
queue_work(workqueue_susp, &work_defaultsusp);
short_autosuspend = false;
}
#if 0
if (usbdev) {
struct usb_interface *intf;
intf = usb_ifnum_to_if(usbdev, 0);
pr_info("%s: cnt %d intf=%p &intf->dev=%p kobje=%s\n",
__func__, atomic_read(&intf->dev.power.usage_count),intf,&intf->dev,kobject_name(&intf->dev.kobj));
}
#endif
break;
#ifndef CONFIG_REMOVE_HSIC_L3_STATE
case BBXMM_PS_L3:
if (baseband_xmm_powerstate == BBXMM_PS_L2TOL0) {
pr_info("%s: baseband_xmm_powerstate == BBXMM_PS_L2TOL0\n", __func__);
if (!gpio_get_value(data->modem.xmm.ipc_ap_wake)) {
spin_lock_irqsave(&xmm_lock, flags);
wakeup_pending = true;
spin_unlock_irqrestore(&xmm_lock, flags);
pr_info("%s: L2 race condition-CP wakeup pending\n", __func__);
}
}
pr_info("L3\n");
baseband_xmm_powerstate = status;
spin_lock_irqsave(&xmm_lock, flags);
system_suspending = false;
spin_unlock_irqrestore(&xmm_lock, flags);
if (wake_lock_active(&wakelock)) {
pr_info("L3 --- wake_unlock[%s]\n", wakelock.name);
wake_unlock(&wakelock);
}
if (wakeup_pending == false) {
gpio_set_value(data->modem.xmm.ipc_hsic_active, 0);
waiting_falling_flag = 0;
pr_info("gpio host active low->\n");
}
break;
#endif
case BBXMM_PS_L2TOL0:
pr_info("L2->L0\n");
spin_lock_irqsave(&xmm_lock, flags);
system_suspending = false;
wakeup_pending = false;
spin_unlock_irqrestore(&xmm_lock, flags);
/* do this only from L2 state */
if (baseband_xmm_powerstate == BBXMM_PS_L2) {
baseband_xmm_powerstate = status;
//pr_info("BB XMM POWER STATE = %d\n", status);
baseband_xmm_power_L2_resume();
} else
goto exit_without_state_change;
default:
break;
}
baseband_xmm_powerstate = status;
pr_info("BB XMM POWER STATE = %d\n", status);
return;
exit_without_state_change:
pr_info("BB XMM POWER STATE = %d (not change to %d)\n",
baseband_xmm_powerstate, status);
return;
}
EXPORT_SYMBOL_GPL(baseband_xmm_set_power_status);
irqreturn_t baseband_xmm_power_ipc_ap_wake_irq(int irq, void *dev_id)
{
int value;
struct baseband_power_platform_data *data = baseband_power_driver_data;
/* pr_info("%s\n", __func__); */
value = gpio_get_value(data->modem.xmm.ipc_ap_wake);
if (ipc_ap_wake_state < IPC_AP_WAKE_IRQ_READY) {
pr_err("%s - spurious irq\n", __func__);
} else if (ipc_ap_wake_state == IPC_AP_WAKE_IRQ_READY) {
if (!value) {
pr_info("%s - IPC_AP_WAKE_INIT1"
" - got falling edge\n",
__func__);
if (waiting_falling_flag == 0) {
pr_info("%s return because irq must get the rising event at first\n", __func__);
return IRQ_HANDLED;
}
/* go to IPC_AP_WAKE_INIT1 state */
ipc_ap_wake_state = IPC_AP_WAKE_INIT1;
/* queue work */
queue_work(workqueue, &init1_work);
} else {
pr_info("%s - IPC_AP_WAKE_INIT1"
" - wait for falling edge\n",
__func__);
waiting_falling_flag = 1;
}
} else if (ipc_ap_wake_state == IPC_AP_WAKE_INIT1) {
if (!value) {
pr_info("%s - IPC_AP_WAKE_INIT2"
" - wait for rising edge\n",
__func__);
} else {
pr_info("%s - IPC_AP_WAKE_INIT2"
" - got rising edge\n",
__func__);
/* go to IPC_AP_WAKE_INIT2 state */
ipc_ap_wake_state = IPC_AP_WAKE_INIT2;
/* queue work */
queue_work(workqueue, &init2_work);
}
} else {
if (!value) {
pr_info("%s - falling\n", __func__);
/* First check it a CP ack or CP wake */
if (data->pin_state == 0) {
/* AP L2 to L0 wakeup */
pr_info("VP: received rising wakeup ap l2->l0\n");
data->pin_state = 1;
wake_up_interruptible(&data->bb_wait);
}
value = gpio_get_value
(data->modem.xmm.ipc_bb_wake);
if (value) {
pr_info("cp ack for bb_wake\n");
ipc_ap_wake_state = IPC_AP_WAKE_L;
return IRQ_HANDLED;
}
spin_lock(&xmm_lock);
wakeup_pending = true;
if (system_suspending) {
spin_unlock(&xmm_lock);
pr_info("system_suspending=1, Just set wakup_pending flag=true\n");
} else {
#ifndef CONFIG_REMOVE_HSIC_L3_STATE
if (baseband_xmm_powerstate ==
BBXMM_PS_L3) {
spin_unlock(&xmm_lock);
pr_info(" CP L3 -> L0\n");
pr_info("set wakeup_pending=true, wait for no-irq-resuem if you are not under LP0 yet !.\n");
pr_info("set wakeup_pending=true, wait for system resume if you already under LP0.\n");
} else
#endif
if (baseband_xmm_powerstate ==
BBXMM_PS_L2) {
CP_initiated_L2toL0 = true;
spin_unlock(&xmm_lock);
baseband_xmm_set_power_status
(BBXMM_PS_L2TOL0);
} else {
CP_initiated_L2toL0 = true;
spin_unlock(&xmm_lock);
pr_info(" CP wakeup pending- new race condition");
}
}
/* save gpio state */
ipc_ap_wake_state = IPC_AP_WAKE_L;
} else {
pr_info("%s - rising\n", __func__);
value = gpio_get_value
(data->modem.xmm.ipc_hsic_active);
if (!value) {
pr_info("host active low: ignore request\n");
ipc_ap_wake_state = IPC_AP_WAKE_H;
return IRQ_HANDLED;
}
value = gpio_get_value
(data->modem.xmm.ipc_bb_wake);
if (value) {
/* Clear the slave wakeup request */
gpio_set_value
(data->modem.xmm.ipc_bb_wake, 0);
pr_info("set gpio slave wakeup low done ->\n");
}
if (reenable_autosuspend && usbdev) {
struct usb_interface *intf;
reenable_autosuspend = false;
intf = usb_ifnum_to_if(usbdev, 0);
if( NULL != intf ){
if (usb_autopm_get_interface_async(intf) >= 0) {
pr_info("get_interface_async succeeded"
" - call put_interface\n");
usb_autopm_put_interface_async(intf);
} else {
pr_info("get_interface_async failed"
" - do not call put_interface\n");
}
}
}
if (short_autosuspend&& (&usbdev->dev)) {
pr_info("set autosuspend delay %d ms\n", short_autosuspend_delay);
queue_work(workqueue_susp, &work_shortsusp);
}
modem_sleep_flag = false;
baseband_xmm_set_power_status(BBXMM_PS_L0);
/* save gpio state */
ipc_ap_wake_state = IPC_AP_WAKE_H;
}
}
return IRQ_HANDLED;
}
EXPORT_SYMBOL(baseband_xmm_power_ipc_ap_wake_irq);
static void baseband_xmm_power_reset_host_active_work(struct work_struct *work)
{
/* set host_active for interrupt modem */
int value = gpio_get_value(TEGRA_GPIO_PN1);
pr_info("Oringial IPC_HSIC_ACTIVE =%d", value);
gpio_set_value(TEGRA_GPIO_PN1,!value);
msleep(100);
gpio_set_value(TEGRA_GPIO_PN1,value);
}
static void baseband_xmm_power_init1_work(struct work_struct *work)
{
int value;
pr_info("%s {\n", __func__);
/* check if IPC_HSIC_ACTIVE high */
value = gpio_get_value(baseband_power_driver_data->
modem.xmm.ipc_hsic_active);
if (value != 1) {
pr_err("%s - expected IPC_HSIC_ACTIVE high!\n", __func__);
return;
}
/* wait 100 ms */
msleep(100);
/* set IPC_HSIC_ACTIVE low */
gpio_set_value(baseband_power_driver_data->
modem.xmm.ipc_hsic_active, 0);
/* wait 10 ms */
msleep(10);
/* set IPC_HSIC_ACTIVE high */
gpio_set_value(baseband_power_driver_data->
modem.xmm.ipc_hsic_active, 1);
/* wait 20 ms */
msleep(20);
#ifdef BB_XMM_OEM1
/* set IPC_HSIC_ACTIVE low */
gpio_set_value(baseband_power_driver_data->
modem.xmm.ipc_hsic_active, 0);
printk(KERN_INFO"%s merge need check set IPC_HSIC_ACTIVE low\n", __func__);
#endif /* BB_XMM_OEM1 */
pr_info("%s }\n", __func__);
}
static void baseband_xmm_power_init2_work(struct work_struct *work)
{
struct baseband_power_platform_data *data = baseband_power_driver_data;
pr_info("%s\n", __func__);
/* check input */
if (!data)
return;
/* register usb host controller only once */
if (register_hsic_device) {
if (data->hsic_register)
data->modem.xmm.hsic_device = data->hsic_register();
else
pr_err("%s: hsic_register is missing\n", __func__);
register_hsic_device = false;
}
}
/* Do the work for AP/CP initiated L2->L0 */
static void baseband_xmm_power_L2_resume(void)
{
struct baseband_power_platform_data *data = baseband_power_driver_data;
int value;
unsigned long flags;
pr_info("%s\n", __func__);
if (!baseband_power_driver_data)
return;
/* claim the wakelock here to avoid any system suspend */
if (!wake_lock_active(&wakelock))
#ifdef CONFIG_REMOVE_HSIC_L3_STATE
wake_lock_timeout(&wakelock, HZ*2);
#else
wake_lock(&wakelock);
#endif
modem_sleep_flag = false;
spin_lock_irqsave(&xmm_lock, flags);
wakeup_pending = false;
spin_unlock_irqrestore(&xmm_lock, flags);
if (CP_initiated_L2toL0) {
pr_info("CP L2->L0\n");
CP_initiated_L2toL0 = false;
queue_work(workqueue, &L2_resume_work);
#if 0
if (usbdev) {
struct usb_interface *intf;
intf = usb_ifnum_to_if(usbdev, 0);
pr_info("%s: cnt %d intf=%p &intf->dev=%p kobje=%s\n",
__func__, atomic_read(&intf->dev.power.usage_count),intf,&intf->dev,kobject_name(&intf->dev.kobj));
}
#endif
} else {
/* set the slave wakeup request */
#ifdef CONFIG_REMOVE_HSIC_L3_STATE
pr_info("AP/CP L2->L0\n");
#else
pr_info("AP L2->L0\n");
#endif
value = gpio_get_value(data->modem.xmm.ipc_ap_wake);
if (value) {
int ret=0, cptrycount=0, eresyscount=0;
const int delay=200, MAXTRY=5, eredelay=3, MAX_ERETRY=100;
unsigned long target_jiffies=0;
data->pin_state = 0;
retry_cpwake:
if(cptrycount)
{
gpio_set_value(data->modem.xmm.ipc_bb_wake, 0);
mdelay(1);
debug_gpio_dump();
}
target_jiffies = jiffies + msecs_to_jiffies(delay);
/* wake bb */
gpio_set_value(data->modem.xmm.ipc_bb_wake, 1);
retry:
/* wait for cp */
pr_info("waiting for host wakeup from CP... <%d,%d>\n", cptrycount, eresyscount);
ret = wait_event_interruptible_timeout(
data->bb_wait,
data->pin_state == 1 || (gpio_get_value(data->modem.xmm.ipc_ap_wake) == 0),
MIN( (target_jiffies-jiffies), msecs_to_jiffies(delay) ) );
if (ret == 0) {
pr_info("%s: wait for cp ack %d times\n", __func__, cptrycount);
debug_gpio_dump();
cptrycount++;
if(cptrycount == MAXTRY)
{
pr_err("!!AP L2->L0 Failed\n");
trigger_radio_fatal_get_coredump();
return;
}
goto retry_cpwake;
}
if (ret == -ERESTARTSYS ) {
eresyscount++;
pr_info("%s: caught signal, sleep and retry %d times\n", __func__, eresyscount);
if(eresyscount == MAX_ERETRY)
{
pr_err("too many ERESTARTSYS <%d>, abort\n", eresyscount);
debug_gpio_dump();
trigger_radio_fatal_get_coredump();
return;
}
msleep(eredelay);
goto retry;
}
pr_info("Get gpio host wakeup low <-\n");
} else {
pr_info("CP already ready\n");
}
}
}
static void baseband_xmm_power_shortsusp(struct work_struct *work)
{
if (!usbdev || !&usbdev->dev)
{
pr_err("%s usbdev is invalid\n", __func__);
return;
}
pm_runtime_set_autosuspend_delay(&usbdev->dev, short_autosuspend_delay);
pr_info("%s set_autosuspend_delay <%d>", __func__, short_autosuspend_delay);
}
static void baseband_xmm_power_defaultsusp(struct work_struct *work)
{
if (!usbdev || !&usbdev->dev)
{
pr_err("%s usbdev is invalid\n", __func__);
return;
}
pm_runtime_set_autosuspend_delay(&usbdev->dev, autosuspend_delay);
//pr_info("%s set_autosuspend_delay <%d>", __func__, autosuspend_delay);
}
/* Do the work for CP initiated L2->L0 */
static void baseband_xmm_power_L2_resume_work(struct work_struct *work)
{
struct usb_interface *intf;
pr_info("%s {\n", __func__);
if (!usbdev) {
pr_info("%s - !usbdev\n", __func__);
return;
}
usb_lock_device(usbdev);
intf = usb_ifnum_to_if(usbdev, 0);
if( NULL != intf ){
if (usb_autopm_get_interface(intf) == 0)
usb_autopm_put_interface(intf);
}
usb_unlock_device(usbdev);
pr_info("} %s\n", __func__);
}
static void baseband_xmm_power_reset_on(void)
{
/* reset / power on sequence */
msleep(40);
gpio_set_value(baseband_power_driver_data->modem.xmm.bb_rst, 1);
msleep(1);
gpio_set_value(baseband_power_driver_data->modem.xmm.bb_on, 1);
udelay(40);
gpio_set_value(baseband_power_driver_data->modem.xmm.bb_on, 0);
}
static struct baseband_xmm_power_work_t *baseband_xmm_power_work;
static void baseband_xmm_power_work_func(struct work_struct *work)
{
struct baseband_xmm_power_work_t *bbxmm_work
= (struct baseband_xmm_power_work_t *) work;
pr_info("%s - work->sate=%d\n", __func__, bbxmm_work->state);
switch (bbxmm_work->state) {
case BBXMM_WORK_UNINIT:
pr_info("BBXMM_WORK_UNINIT\n");
break;
case BBXMM_WORK_INIT:
pr_info("BBXMM_WORK_INIT\n");
/* go to next state */
bbxmm_work->state = (modem_flash && !modem_pm)
? BBXMM_WORK_INIT_FLASH_STEP1
: (modem_flash && modem_pm)
? BBXMM_WORK_INIT_FLASH_PM_STEP1
: (!modem_flash && modem_pm)
? BBXMM_WORK_INIT_FLASHLESS_PM_STEP1
: BBXMM_WORK_UNINIT;
pr_info("Go to next state %d\n", bbxmm_work->state);
queue_work(workqueue, work);
break;
case BBXMM_WORK_INIT_FLASH_STEP1:
//pr_info("BBXMM_WORK_INIT_FLASH_STEP1\n");
/* register usb host controller */
pr_info("%s: register usb host controller\n", __func__);
if (baseband_power_driver_data->hsic_register)
baseband_power_driver_data->modem.xmm.hsic_device =
baseband_power_driver_data->hsic_register();
else
pr_err("%s: hsic_register is missing\n", __func__);
break;
case BBXMM_WORK_INIT_FLASH_PM_STEP1:
//pr_info("BBXMM_WORK_INIT_FLASH_PM_STEP1\n");
/* [modem ver >= 1130] start with IPC_HSIC_ACTIVE low */
if (modem_ver >= XMM_MODEM_VER_1130) {
pr_info("%s: ver > 1130:"
" ipc_hsic_active -> 0\n", __func__);
gpio_set_value(baseband_power_driver_data->
modem.xmm.ipc_hsic_active, 0);
}
/* reset / power on sequence */
baseband_xmm_power_reset_on();
/* set power status as on */
power_onoff = 1;
/* optional delay
* 0 = flashless
* ==> causes next step to enumerate modem boot rom
* (058b / 0041)
* some delay > boot rom timeout
* ==> causes next step to enumerate modem software
* (1519 / 0020)
* (requires modem to be flash version, not flashless
* version)
*/
if (enum_delay_ms)
msleep(enum_delay_ms);
/* register usb host controller */
pr_info("%s: register usb host controller\n", __func__);
if (baseband_power_driver_data->hsic_register)
baseband_power_driver_data->modem.xmm.hsic_device =
baseband_power_driver_data->hsic_register();
else
pr_err("%s: hsic_register is missing\n", __func__);
/* go to next state */
bbxmm_work->state = (modem_ver < XMM_MODEM_VER_1130)
? BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1
: BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1;
queue_work(workqueue, work);
pr_info("Go to next state %d\n", bbxmm_work->state);
break;
case BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1:
pr_info("BBXMM_WORK_INIT_FLASH_PM_VER_LT_1130_STEP1\n");
break;
case BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1:
pr_info("BBXMM_WORK_INIT_FLASH_PM_VER_GE_1130_STEP1\n");
break;
case BBXMM_WORK_INIT_FLASHLESS_PM_STEP1:
//pr_info("BBXMM_WORK_INIT_FLASHLESS_PM_STEP1\n");
/* go to next state */
bbxmm_work->state = (modem_ver < XMM_MODEM_VER_1130)
? BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_WAIT_IRQ
: BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1;
queue_work(workqueue, work);
break;
case BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1:
pr_info("BBXMM_WORK_INIT_FLASHLESS_PM_VER_LT_1130_STEP1\n");
break;
case BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1:
//pr_info("BBXMM_WORK_INIT_FLASHLESS_PM_VER_GE_1130_STEP1\n");
break;
default:
break;
}
}
static void baseband_xmm_device_add_handler(struct usb_device *udev)
{
struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
const struct usb_device_id *id;
pr_info("%s \n",__func__);
if (intf == NULL)
return;
id = usb_match_id(intf, xmm_pm_ids);
if (id) {
pr_info("persist_enabled: %u\n", udev->persist_enabled);
pr_info("Add device %d <%s %s>\n", udev->devnum,
udev->manufacturer, udev->product);
usbdev = udev;
pm_runtime_set_autosuspend_delay(&udev->dev, autosuspend_delay);//for ICS 39kernel
usb_enable_autosuspend(udev);
// pr_info("enable autosuspend, timer <%d>", autosuspend_delay);
}
}
static void baseband_xmm_device_remove_handler(struct usb_device *udev)
{
if (usbdev == udev) {
pr_info("Remove device %d <%s %s>\n", udev->devnum,
udev->manufacturer, udev->product);
usbdev = 0;
}
}
static int usb_xmm_notify(struct notifier_block *self, unsigned long action,
void *blob)
{
switch (action) {
case USB_DEVICE_ADD:
baseband_xmm_device_add_handler(blob);
break;
case USB_DEVICE_REMOVE:
baseband_xmm_device_remove_handler(blob);
break;
}
return NOTIFY_OK;
}
static struct notifier_block usb_xmm_nb = {
.notifier_call = usb_xmm_notify,
};
static int baseband_xmm_power_pm_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct baseband_power_platform_data *data = baseband_power_driver_data;
unsigned long flags;
if (!data)
return NOTIFY_DONE;
pr_info("%s: event %ld\n", __func__, event);
switch (event) {
case PM_SUSPEND_PREPARE:
pr_info("%s : PM_SUSPEND_PREPARE\n", __func__);
if (wake_lock_active(&wakelock)) {
pr_info("%s: wakelock was active, aborting suspend\n",__func__);
return NOTIFY_STOP;
}
spin_lock_irqsave(&xmm_lock, flags);
if (wakeup_pending) {
wakeup_pending = false;
spin_unlock_irqrestore(&xmm_lock, flags);
pr_info("%s : XMM busy : Abort system suspend\n",
__func__);
return NOTIFY_STOP;
}
system_suspending = true;
spin_unlock_irqrestore(&xmm_lock, flags);
return NOTIFY_OK;
case PM_POST_SUSPEND:
pr_info("%s : PM_POST_SUSPEND\n", __func__);
spin_lock_irqsave(&xmm_lock, flags);
system_suspending = false;
if (wakeup_pending &&
(baseband_xmm_powerstate == BBXMM_PS_L2)) {
wakeup_pending = false;
spin_unlock_irqrestore(&xmm_lock, flags);
pr_info("%s : Service Pending CP wakeup\n",
__func__);
CP_initiated_L2toL0 = true;
baseband_xmm_set_power_status
(BBXMM_PS_L2TOL0);
return NOTIFY_OK;
}
wakeup_pending = false;
spin_unlock_irqrestore(&xmm_lock, flags);
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static struct notifier_block baseband_xmm_power_pm_notifier = {
.notifier_call = baseband_xmm_power_pm_notifier_event,
};
static int baseband_xmm_power_driver_probe(struct platform_device *device)
{
struct baseband_power_platform_data *data
= (struct baseband_power_platform_data *)
device->dev.platform_data;
struct device *dev = &device->dev;
unsigned long flags;
int err, ret=0;
pr_info(MODULE_NAME"%s 0705 - xmm_wake_pin_miss. \n", __func__);
// pr_info(MODULE_NAME"enum_delay_ms=%d\n", enum_delay_ms);
htcpcbid=htc_get_pcbid_info();
pr_info(MODULE_NAME"htcpcbid=%d\n", htcpcbid);
/* check for platform data */
if (!data)
return -ENODEV;
/* check if supported modem */
if (data->baseband_type != BASEBAND_XMM) {
pr_err("unsuppported modem\n");
return -ENODEV;
}
/* save platform data */
baseband_power_driver_data = data;
/* init wait queue */
data->pin_state = 1;
init_waitqueue_head(&data->bb_wait);
/* create device file */
err = device_create_file(dev, &dev_attr_xmm_onoff);
if (err < 0) {
pr_err("%s - device_create_file failed\n", __func__);
return -ENODEV;
}
err = device_create_file(dev, &dev_attr_debug_handler);
if (err < 0) {
pr_err("%s - device_create_file failed\n", __func__);
return -ENODEV;
}
/* HTC: create device file for host debugging */
if (device_create_file(dev,&dev_attr_host_dbg))
pr_info(MODULE_NAME"Warning: host attribute can't be created\n");
/* init wake lock */
wake_lock_init(&wakelock, WAKE_LOCK_SUSPEND, "baseband_xmm_power");
/* init spin lock */
spin_lock_init(&xmm_lock);
/* request baseband gpio(s) */
tegra_baseband_gpios[0].gpio = baseband_power_driver_data
->modem.xmm.bb_rst;
tegra_baseband_gpios[1].gpio = baseband_power_driver_data
->modem.xmm.bb_on;
tegra_baseband_gpios[2].gpio = baseband_power_driver_data
->modem.xmm.ipc_bb_wake;
tegra_baseband_gpios[3].gpio = baseband_power_driver_data
->modem.xmm.ipc_ap_wake;
tegra_baseband_gpios[4].gpio = baseband_power_driver_data
->modem.xmm.ipc_hsic_active;
tegra_baseband_gpios[5].gpio = baseband_power_driver_data
->modem.xmm.ipc_hsic_sus_req;
tegra_baseband_gpios[6].gpio = baseband_power_driver_data
->modem.xmm.bb_vdd_en;
tegra_baseband_gpios[7].gpio = baseband_power_driver_data
->modem.xmm.bb_rst_pwrdn;
tegra_baseband_gpios[8].gpio = baseband_power_driver_data
->modem.xmm.bb_rst2;
/*HTC request these gpio on probe only, config them when running power_on/off function*/
err = gpio_request_only_array(tegra_baseband_gpios,
ARRAY_SIZE(tegra_baseband_gpios));
if (err < 0) {
pr_err("%s - request gpio(s) failed\n", __func__);
return -ENODEV;
}
#if 1/*HTC*/
//assing for usb
tegra_baseband_gpios_power_off_modem[0].gpio = baseband_power_driver_data
->modem.xmm.bb_rst;
tegra_baseband_gpios_power_off_modem[1].gpio = baseband_power_driver_data
->modem.xmm.bb_on;
tegra_baseband_gpios_power_off_modem[2].gpio = baseband_power_driver_data
->modem.xmm.ipc_bb_wake;
tegra_baseband_gpios_power_off_modem[3].gpio = baseband_power_driver_data
->modem.xmm.ipc_ap_wake;
tegra_baseband_gpios_power_off_modem[4].gpio = baseband_power_driver_data
->modem.xmm.ipc_hsic_active;
tegra_baseband_gpios_power_off_modem[5].gpio = baseband_power_driver_data
->modem.xmm.ipc_hsic_sus_req;
tegra_baseband_gpios_power_off_modem[6].gpio = baseband_power_driver_data
->modem.xmm.bb_vdd_en;
tegra_baseband_gpios_power_off_modem[7].gpio = baseband_power_driver_data
->modem.xmm.bb_rst_pwrdn;
tegra_baseband_gpios_power_off_modem[8].gpio = baseband_power_driver_data
->modem.xmm.bb_rst2;
//request UART
pr_info("%s request UART\n", __func__);
err =gpio_request(TEGRA_GPIO_PJ7, "IMC_UART_TX");
err =gpio_request(TEGRA_GPIO_PK7, "IMC_UART_RTS");
err =gpio_request(TEGRA_GPIO_PB0 ,"IMC_UART_RX");
err =gpio_request(TEGRA_GPIO_PB1, "IMC_UART_CTS");
pr_info("%s pull UART o d\n", __func__);
//for power consumation
//all the needed config put on power_on function
pr_info("%s config_gpio_for_power_off\n", __func__);
err=config_gpio_for_power_off();
if (err < 0) {
pr_err("%s - config_gpio_for_power_off gpio(s)\n", __func__);
return -ENODEV;
}
#endif/*HTC*/
/* request baseband irq(s) */
if (modem_flash && modem_pm) {
pr_info("%s: request_irq IPC_AP_WAKE_IRQ\n", __func__);
ipc_ap_wake_state = IPC_AP_WAKE_UNINIT;
err = request_threaded_irq(
gpio_to_irq(data->modem.xmm.ipc_ap_wake),
baseband_xmm_power_ipc_ap_wake_irq,
NULL,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"IPC_AP_WAKE_IRQ",
NULL);
if (err < 0) {
pr_err("%s - request irq IPC_AP_WAKE_IRQ failed\n",
__func__);
return err;
}
ipc_ap_wake_state = IPC_AP_WAKE_IRQ_READY;
if (modem_ver >= XMM_MODEM_VER_1130) {
pr_info("%s: ver > 1130: AP_WAKE_INIT1\n", __func__);
/* ver 1130 or later starts in INIT1 state */
ipc_ap_wake_state = IPC_AP_WAKE_INIT1;
}
}
/* init work queue */
workqueue = create_singlethread_workqueue("baseband_xmm_power_workqueue");
if (!workqueue) {
pr_err("cannot create workqueue\n");
return -1;
}
workqueue_susp = alloc_workqueue("baseband_xmm_power_autosusp", WQ_UNBOUND | WQ_HIGHPRI | WQ_NON_REENTRANT, 1);
if (!workqueue_susp) {
pr_err("cannot create workqueue_susp\n");
return -1;
}
workqueue_debug = create_singlethread_workqueue("baseband_xmm_power_debug");
if (!workqueue_debug) {
pr_err("cannot create workqueue_debug\n");
return -1;
}
baseband_xmm_power_work = (struct baseband_xmm_power_work_t *)
kmalloc(sizeof(struct baseband_xmm_power_work_t), GFP_KERNEL);
if (!baseband_xmm_power_work) {
pr_err("cannot allocate baseband_xmm_power_work\n");
return -1;
}
INIT_WORK((struct work_struct *) baseband_xmm_power_work,
baseband_xmm_power_work_func);
baseband_xmm_power_work->state = BBXMM_WORK_INIT;
queue_work(workqueue,
(struct work_struct *) baseband_xmm_power_work);
/* init work objects */
INIT_WORK(&init1_work, baseband_xmm_power_init1_work);
INIT_WORK(&init2_work, baseband_xmm_power_init2_work);
INIT_WORK(&L2_resume_work, baseband_xmm_power_L2_resume_work);
INIT_WORK(&work_shortsusp, baseband_xmm_power_shortsusp);
INIT_WORK(&work_defaultsusp, baseband_xmm_power_defaultsusp);
INIT_WORK(&work_reset_host_active, baseband_xmm_power_reset_host_active_work);
/* init state variables */
register_hsic_device = true;
CP_initiated_L2toL0 = false;
baseband_xmm_powerstate = BBXMM_PS_UNINIT;
spin_lock_irqsave(&xmm_lock, flags);
wakeup_pending = false;
system_suspending = false;
spin_unlock_irqrestore(&xmm_lock, flags);
usb_register_notify(&usb_xmm_nb);
register_pm_notifier(&baseband_xmm_power_pm_notifier);
/*HTC*/
/*set Radio fatal Pin PN2 to OutPut Low*/
ret=gpio_direction_output(TEGRA_GPIO_PN2,0);
if (ret < 0)
pr_err("%s: set Radio fatal Pin to Output error\n", __func__);
/*set BB2AP_SUSPEND_REQ Pin (TEGRA_GPIO_PV0) to OutPut Low*/
ret=gpio_direction_output(TEGRA_GPIO_PV0,0);
if (ret < 0)
pr_err("%s: set BB2AP_SUSPEND_REQ Pin to Output error\n", __func__);
//Request SIM det to wakeup Source wahtever in flight mode on/off
/*For SIM det*/
pr_info("%s: request enable irq wake SIM det to wakeup source\n", __func__);
ret = enable_irq_wake(gpio_to_irq(TEGRA_GPIO_PI5));
if (ret < 0)
pr_err("%s: enable_irq_wake error\n", __func__);
pr_info("%s: init kobj for silent reset", __func__);
silent_reset_kset = kset_create_and_add("SilentResetKset", NULL, NULL);
if(!silent_reset_kset)
{
pr_err("%s: silent_reset_kset create failure.", __func__);
}
else
{
silent_reset_kobj = kobject_create_and_add("SilentResetTrigger", kobject_get(&dev->kobj));
if(!silent_reset_kobj)
{
pr_err("%s: silent_reset_kobj create failure.", __func__);
kset_unregister(silent_reset_kset);
silent_reset_kset = NULL;
}
else
silent_reset_kobj->kset = silent_reset_kset;
}
pr_info("%s }\n", __func__);
return 0;
}
static int baseband_xmm_power_driver_remove(struct platform_device *device)
{
struct baseband_power_platform_data *data
= (struct baseband_power_platform_data *)
device->dev.platform_data;
struct device *dev = &device->dev;
pr_info("%s\n", __func__);
/* check for platform data */
if (!data)
return 0;
unregister_pm_notifier(&baseband_xmm_power_pm_notifier);
usb_unregister_notify(&usb_xmm_nb);
/* free work structure */
kfree(baseband_xmm_power_work);
baseband_xmm_power_work = (struct baseband_xmm_power_work_t *) 0;
/* free baseband irq(s) */
if (modem_flash && modem_pm) {
free_irq(gpio_to_irq(baseband_power_driver_data
->modem.xmm.ipc_ap_wake), NULL);
}
/* free baseband gpio(s) */
gpio_free_array(tegra_baseband_gpios,
ARRAY_SIZE(tegra_baseband_gpios));
/* destroy wake lock */
wake_lock_destroy(&wakelock);
/* delete device file */
device_remove_file(dev, &dev_attr_xmm_onoff);
device_remove_file(dev, &dev_attr_debug_handler);
/* HTC: delete device file */
device_remove_file(dev, &dev_attr_host_dbg);
/* destroy wake lock */
destroy_workqueue(workqueue_susp);
destroy_workqueue(workqueue);
if(silent_reset_kset)
{
kset_unregister(silent_reset_kset);
silent_reset_kset = NULL;
}
if(silent_reset_kobj)
{
kobject_put(silent_reset_kobj);
kobject_put(&dev->kobj);
}
/* unregister usb host controller */
if (data->hsic_unregister && data->modem.xmm.hsic_device)
{
data->hsic_unregister(data->modem.xmm.hsic_device);
data->modem.xmm.hsic_device = NULL;
}
else
pr_err("%s: hsic_unregister is missing\n", __func__);
return 0;
}
#ifndef CONFIG_REMOVE_HSIC_L3_STATE
static int baseband_xmm_power_driver_handle_resume(
struct baseband_power_platform_data *data)
{
int value;
unsigned long flags;
unsigned long timeout;
int delay = 10000; /* maxmum delay in msec */
//pr_info("%s\n", __func__);
/* check for platform data */
if (!data)
return 0;
/* check if modem is on */
if (power_onoff == 0) {
pr_info("%s - flight mode - nop\n", __func__);
return 0;
}
modem_sleep_flag = false;
spin_lock_irqsave(&xmm_lock, flags);
/* Clear wakeup pending flag */
wakeup_pending = false;
spin_unlock_irqrestore(&xmm_lock, flags);
/* L3->L0 */
baseband_xmm_set_power_status(BBXMM_PS_L3TOL0);
value = gpio_get_value(data->modem.xmm.ipc_ap_wake);
if (value) {
pr_info("AP L3 -> L0\n");
pr_info("waiting for host wakeup...\n");
timeout = jiffies + msecs_to_jiffies(delay);
/* wake bb */
gpio_set_value(data->modem.xmm.ipc_bb_wake, 1);
pr_info("Set bb_wake high ->\n");
do {
udelay(100);
value = gpio_get_value(data->modem.xmm.ipc_ap_wake);
if (!value)
break;
} while (time_before(jiffies, timeout));
if (!value) {
pr_info("gpio host wakeup low <-\n");
pr_info("%s enable short_autosuspend\n", __func__);
short_autosuspend = true;
}
else
pr_info("!!AP L3->L0 Failed\n");
} else {
pr_info("CP L3 -> L0\n");
}
reenable_autosuspend = true;
return 0;
}
#endif
#ifdef CONFIG_PM
#ifdef CONFIG_REMOVE_HSIC_L3_STATE
static int baseband_xmm_power_driver_suspend(struct device *dev)
{
// int delay = 10000; /* maxmum delay in msec */
// struct platform_device *pdev = to_platform_device(dev);
//struct baseband_power_platform_data *pdata = pdev->dev.platform_data;
//pr_info("%s\n", __func__);
/* check if modem is on */
if (power_onoff == 0) {
pr_info("%s - flight mode - nop\n", __func__);
return 0;
}
/* PMC is driving hsic bus
* tegra_baseband_rail_off();
*/
return 0;
}
#else
static int baseband_xmm_power_driver_suspend(struct device *dev)
{
pr_info("%s\n", __func__);
return 0;
}
#endif /* CONFIG_REMOVE_HSIC_L3_STATE */
static int baseband_xmm_power_driver_resume(struct device *dev)
{
//struct platform_device *pdev = to_platform_device(dev);
//struct baseband_power_platform_data *data
// = (struct baseband_power_platform_data *)
// pdev->dev.platform_data;
pr_info("%s\n", __func__);
#ifdef CONFIG_REMOVE_HSIC_L3_STATE
/* check if modem is on */
if (power_onoff == 0) {
pr_info("%s - flight mode - nop\n", __func__);
return 0;
}
/* PMC is driving hsic bus
* tegra_baseband_rail_on();
*/
reenable_autosuspend = true;
#else
baseband_xmm_power_driver_handle_resume(data);
#endif
return 0;
}
static int baseband_xmm_power_suspend_noirq(struct device *dev)
{
unsigned long flags;
pr_info("%s\n", __func__);
spin_lock_irqsave(&xmm_lock, flags);
system_suspending = false;
if (wakeup_pending) {
wakeup_pending = false;
spin_unlock_irqrestore(&xmm_lock, flags);
pr_info("%s:**Abort Suspend: reason CP WAKEUP**\n", __func__);
return -EBUSY;
}
spin_unlock_irqrestore(&xmm_lock, flags);
return 0;
}
static int baseband_xmm_power_resume_noirq(struct device *dev)
{
pr_info("%s\n", __func__);
return 0;
}
static const struct dev_pm_ops baseband_xmm_power_dev_pm_ops = {
.suspend_noirq = baseband_xmm_power_suspend_noirq,
.resume_noirq = baseband_xmm_power_resume_noirq,
.suspend = baseband_xmm_power_driver_suspend,
.resume = baseband_xmm_power_driver_resume,
};
#endif
static struct platform_driver baseband_power_driver = {
.probe = baseband_xmm_power_driver_probe,
.remove = baseband_xmm_power_driver_remove,
.driver = {
.name = "baseband_xmm_power",
#ifdef CONFIG_PM
.pm = &baseband_xmm_power_dev_pm_ops,
#endif
},
};
static int __init baseband_xmm_power_init(void)
{
/* HTC */
int mfg_mode = board_mfg_mode();
host_dbg_flag = 0;
//pr_info("%s - host_dbg_flag<0x%x>, modem_ver<0x%x>, mfg_mode<%d>"
// , __func__, host_dbg_flag, modem_ver, mfg_mode);
if( mfg_mode )
{
autosuspend_delay = 365*86400;
short_autosuspend_delay = 365*86400;
//pr_info("In MFG mode, autosuspend_delay <%d>, short_autosuspend_delay <%d>"
// , autosuspend_delay, short_autosuspend_delay );
}
s_sku_id = board_get_sku_tag();
pr_info("SKU_ID is 0x%x", s_sku_id);
//printk("%s:VP adding pm qos request removed\n", __func__);
//pm_qos_add_request(&modem_boost_cpu_freq_req, PM_QOS_CPU_FREQ_MIN, (s32)PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
return platform_driver_register(&baseband_power_driver);
}
static void __exit baseband_xmm_power_exit(void)
{
pr_info("%s\n", __func__);
platform_driver_unregister(&baseband_power_driver);
//pm_qos_remove_request(&modem_boost_cpu_freq_req);
}
module_init(baseband_xmm_power_init)
module_exit(baseband_xmm_power_exit)
| bedalus/hxore | arch/arm/mach-tegra/baseband-xmm-power.c | C | gpl-2.0 | 61,756 |
/* SET.C - performing :set - command
*
* NOTE: Edit this file with tabstop=4 !
*
* 1996-02-29 created;
* 1998-03-14 V 1.0.1
* 1999-01-14 V 1.1.0
* 1999-03-17 V 1.1.1
* 1999-07-02 V 1.2.0 beta
* 1999-08-14 V 1.2.0 final
* 2000-07-15 V 1.3.0 final
* 2001-10-10 V 1.3.1
* 2003-07-03 V 1.3.2
*
* Copyright 1996-2003 by Gerhard Buergmann
* gerhard@puon.at
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* See file COPYING for information on distribution conditions.
*/
#include "bvi.h"
#include "set.h"
extern struct BLOCK_ data_block[BLK_COUNT];
static int from_file = 0;
static FILE *ffp;
static char fbuf[256];
static char buf[64];
struct {
short r;
short g;
short b;
} original_colors[8];
struct {
short f;
short b;
} original_colorpairs[8];
struct param params[] = {
{"autowrite", "aw", FALSE, "", P_BOOL},
{"columns", "cm", 16, "", P_NUM},
{"errorbells", "eb", FALSE, "", P_BOOL},
{"ignorecase", "ic", FALSE, "", P_BOOL},
{"magic", "ma", TRUE, "", P_BOOL},
{"memmove", "mm", FALSE, "", P_BOOL},
{"offset", "of", 0, "", P_NUM},
{"readonly", "ro", FALSE, "", P_BOOL},
{"scroll", "scroll", 12, "", P_NUM},
{"showmode", "mo", TRUE, "", P_BOOL},
{"term", "term", 0, "", P_TEXT},
{"terse", "terse", FALSE, "", P_BOOL},
{"unixstyle", "us", FALSE, "", P_BOOL},
{"window", "window", 25, "", P_NUM},
{"wordlength", "wl", 4, "", P_NUM},
{"wrapscan", "ws", TRUE, "", P_BOOL},
{"", "", 0, "", 0,} /* end marker */
};
struct color colors[] = { /* RGB definitions and default value, if have no support of 256 colors */
{"background", "bg", 50, 50, 50, COLOR_BLACK},
{"addresses", "addr", 335, 506, 700, COLOR_BLUE},
{"hex", "hex", 600, 600, 600, COLOR_MAGENTA},
{"data", "data", 0, 800, 400, COLOR_GREEN},
{"error", "err", 999, 350, 0, COLOR_RED},
{"status", "stat", 255, 255, 255, COLOR_WHITE},
{"command", "comm", 255, 255, 255, COLOR_WHITE},
{"window", "win", 0, 800, 900, COLOR_YELLOW},
{"addrbg", "addrbg", 0, 0, 0, COLOR_CYAN},
{"", "", 0, 0, 0, 0} /* end marker */
};
int doset(arg)
char *arg; /* parameter string */
{
int i;
char *s;
int did_window = FALSE;
int state = TRUE; /* new state of boolean parms. */
char string[80];
if (arg == NULL) {
showparms(FALSE);
return 0;
}
if (!strcmp(arg, "all")) {
showparms(TRUE);
return 0;
}
if (!strncmp(arg, "no", 2)) {
state = FALSE;
arg += 2;
}
/* extract colors section */
if (!strncmp(arg, "color", 5)) {
arg = substr(arg, 6, -1);
for (i = 0; colors[i].fullname[0] != '\0'; i++) {
s = colors[i].fullname;
if (strncmp(arg, s, strlen(s)) == 0)
break;
s = colors[i].shortname;
if (strncmp(arg, s, strlen(s)) == 0)
break;
}
if (i == 0) {
emsg("Wrong color name!");
return 0;
} else {
colors[i].r = atoi(substr(arg, strlen(s) + 1, 3));
colors[i].g = atoi(substr(arg, strlen(s) + 5, 3));
colors[i].b = atoi(substr(arg, strlen(s) + 9, 3));
set_palette();
repaint();
}
return 0;
} else {
emsg(arg);
return 1;
}
for (i = 0; params[i].fullname[0] != '\0'; i++) {
s = params[i].fullname;
if (strncmp(arg, s, strlen(s)) == 0) /* matched full name */
break;
s = params[i].shortname;
if (strncmp(arg, s, strlen(s)) == 0) /* matched short name */
break;
}
if (params[i].fullname[0] != '\0') { /* found a match */
if (arg[strlen(s)] == '?') {
if (params[i].flags & P_BOOL)
sprintf(buf, " %s%s",
(params[i].nvalue ? " " : "no"),
params[i].fullname);
else if (params[i].flags & P_TEXT)
sprintf(buf, " %s=%s", params[i].fullname,
params[i].svalue);
else
sprintf(buf, " %s=%ld", params[i].fullname,
params[i].nvalue);
msg(buf);
return 0;
}
if (!strcmp(params[i].fullname, "term")) {
emsg("Can't change type of terminal from within bvi");
return 1;
}
if (params[i].flags & P_NUM) {
if ((i == P_LI) || (i == P_OF))
did_window++;
if (arg[strlen(s)] != '=' || state == FALSE) {
sprintf(string, "Option %s is not a toggle",
params[i].fullname);
emsg(string);
return 1;
} else {
s = arg + strlen(s) + 1;
if (*s == '0') {
params[i].nvalue = strtol(s, &s, 16);
} else {
params[i].nvalue = strtol(s, &s, 10);
}
params[i].flags |= P_CHANGED;
if (i == P_CM) {
if (((COLS - AnzAdd - 1) / 4) >=
P(P_CM)) {
COLUMNS_DATA = P(P_CM);
} else {
COLUMNS_DATA = P(P_CM) =
((COLS - AnzAdd - 1) / 4);
}
maxx = COLUMNS_DATA * 4 + AnzAdd + 1;
COLUMNS_HEX = COLUMNS_DATA * 3;
status = COLUMNS_HEX + COLUMNS_DATA - 17;
screen = COLUMNS_DATA * (maxy - 1);
did_window++;
stuffin("H"); /* set cursor at HOME */
}
}
} else { /* boolean */
if (arg[strlen(s)] == '=') {
emsg("Invalid set of boolean parameter");
return 1;
} else {
params[i].nvalue = state;
params[i].flags |= P_CHANGED;
}
}
} else {
emsg("No such option@- `set all' gives all option values");
return 1;
}
if (did_window) {
maxy = P(P_LI) - 1;
new_screen();
}
return 0;
}
/* show ALL parameters */
void showparms(all)
int all;
{
struct param *p;
int n;
n = 2;
msg("Parameters:\n");
for (p = ¶ms[0]; p->fullname[0] != '\0'; p++) {
if (!all && ((p->flags & P_CHANGED) == 0))
continue;
if (p->flags & P_BOOL)
sprintf(buf, " %s%s\n",
(p->nvalue ? " " : "no"), p->fullname);
else if (p->flags & P_TEXT)
sprintf(buf, " %s=%s\n", p->fullname, p->svalue);
else
sprintf(buf, " %s=%ld\n", p->fullname, p->nvalue);
msg(buf);
n++;
if (n == params[P_LI].nvalue) {
if (wait_return(FALSE))
return;
n = 1;
}
}
wait_return(TRUE);
}
void save_orig_palette()
{
int i;
for (i = 0; colors[i].fullname[0] != '\0'; i++) {
color_content(colors[i].short_value, &original_colors[i].r, &original_colors[i].g, &original_colors[i].b);
}
for (i = 1; i < 8; i++) {
pair_content(i, &original_colorpairs[i].f, &original_colorpairs[i].b);
}
}
void load_orig_palette()
{
int i;
for (i = 0; colors[i].fullname[0] != '\0'; i++) {
init_color(colors[i].short_value, original_colors[i].r, original_colors[i].g, original_colors[i].b);
}
for (i = 1; i < 8; i++) {
init_pair(i, original_colorpairs[i].f, original_colorpairs[i].b);
}
}
void set_palette()
{
int i;
if (can_change_color()) {
for (i = 0; colors[i].fullname[0] != '\0'; i++) {
if (init_color
(colors[i].short_value, C_r(i), C_g(i),
C_b(i)) == ERR)
fprintf(stderr, "Failed to set [%d] color!\n",
i);
if (C_s(i) <= 7) {
init_pair(i + 1, C_s(i), C_s(0));
} else {
colors[i].short_value = COLOR_WHITE;
init_pair(i + 1, C_s(i), C_s(0));
}
}
init_pair(C_AD + 1, C_s(C_AD), COLOR_CYAN);
} else { /* if have no support of changing colors */
for (i = 0; colors[i].fullname[0] != '\0'; i++) {
if (C_s(i) <= 7) {
init_pair(i + 1, C_s(i), C_s(0));
} else {
colors[i].short_value = COLOR_WHITE;
init_pair(i + 1, C_s(i), C_s(0));
}
}
}
}
/* reads the init file (.bvirc) */
int read_rc(fn)
char *fn;
{
if ((ffp = fopen(fn, "r")) == NULL)
return -1;
from_file = 1;
while (fgets(fbuf, 255, ffp) != NULL) {
strtok(fbuf, "\n\r");
docmdline(fbuf);
}
fclose(ffp);
from_file = 0;
return 0;
}
int do_logic(mode, str)
int mode;
char *str;
{
int a, b;
int value;
size_t n;
char *err_str = "Invalid value@for bit manipulation";
if (mode == LSHIFT || mode == RSHIFT || mode == LROTATE
|| mode == RROTATE) {
value = atoi(str);
if (value < 1 || value > 8) {
emsg(err_str);
return 1;
}
} else {
if (strlen(str) == 8) {
value = strtol(str, NULL, 2);
for (n = 0; n < 8; n++) {
if (str[n] != '0' && str[n] != '1') {
value = -1;
break;
}
}
} else if (str[0] == 'b' || str[0] == 'B') {
value = strtol(str + 1, NULL, 2);
} else if (str[0] == '0') {
value = strtol(str, NULL, 16);
for (n = 0; n < strlen(str); n++) {
if (!isxdigit(str[n])) {
value = -1;
break;
}
}
} else {
value = atoi(str);
}
if (value < 0 || value > 255) {
emsg(err_str);
return 1;
}
}
if ((undo_count =
alloc_buf((off_t) (end_addr - start_addr + 1), &undo_buf))) {
memcpy(undo_buf, start_addr, undo_count);
}
undo_start = start_addr;
edits = U_EDIT;
while (start_addr <= end_addr) {
a = *start_addr;
a &= 0xff;
switch (mode) {
case LSHIFT:
a <<= value;
break;
case RSHIFT:
a >>= value;
break;
case LROTATE:
a <<= value;
b = a >> 8;
a |= b;
break;
case RROTATE:
b = a << 8;
a |= b;
a >>= value;
/*
b = a << (8 - value);
a >>= value;
a |= b;
*/
break;
case AND:
a &= value;
break;
case OR:
a |= value;
break;
case XOR:
case NOT:
a ^= value;
break;
case NEG:
a ^= value;
a++; /* Is this true */
break;
}
*start_addr++ = (char)(a & 0xff);
}
repaint();
return (0);
}
int do_logic_block(mode, str, block_number)
int mode;
char *str;
int block_number;
{
int a, b;
int value;
size_t n;
char *err_str = "Invalid value@for bit manipulation";
if ((block_number >= BLK_COUNT) & (!(data_block[block_number].pos_start < data_block[block_number].pos_end))) {
emsg("Invalid block for bit manipulation!");
return 1;
}
if (mode == LSHIFT || mode == RSHIFT || mode == LROTATE
|| mode == RROTATE) {
value = atoi(str);
if (value < 1 || value > 8) {
emsg(err_str);
return 1;
}
} else {
if (strlen(str) == 8) {
value = strtol(str, NULL, 2);
for (n = 0; n < 8; n++) {
if (str[n] != '0' && str[n] != '1') {
value = -1;
break;
}
}
} else if (str[0] == 'b' || str[0] == 'B') {
value = strtol(str + 1, NULL, 2);
} else if (str[0] == '0') {
value = strtol(str, NULL, 16);
for (n = 0; n < strlen(str); n++) {
if (!isxdigit(str[n])) {
value = -1;
break;
}
}
} else {
value = atoi(str);
}
if (value < 0 || value > 255) {
emsg(err_str);
return 1;
}
}
if ((undo_count =
alloc_buf((off_t) (data_block[block_number].pos_end -
data_block[block_number].pos_start + 1), &undo_buf))) {
memcpy(undo_buf, start_addr + data_block[block_number].pos_start, undo_count);
}
undo_start = start_addr + data_block[block_number].pos_start;
edits = U_EDIT;
start_addr = start_addr + data_block[block_number].pos_start;
end_addr = start_addr + data_block[block_number].pos_end - data_block[block_number].pos_start;
while (start_addr <= end_addr) {
a = *start_addr;
a &= 0xff;
switch (mode) {
case LSHIFT:
a <<= value;
break;
case RSHIFT:
a >>= value;
break;
case LROTATE:
a <<= value;
b = a >> 8;
a |= b;
break;
case RROTATE:
b = a << 8;
a |= b;
a >>= value;
/*
b = a << (8 - value);
a >>= value;
a |= b;
*/
break;
case AND:
a &= value;
break;
case OR:
a |= value;
break;
case XOR:
case NOT:
a ^= value;
break;
case NEG:
a ^= value;
a++; /* Is this true */
break;
}
*start_addr++ = (char)(a & 0xff);
}
repaint();
return (0);
}
int getcmdstr(p, x)
char *p;
int x;
{
int c;
int n;
char *buff, *q;
attron(COLOR_PAIR(C_CM + 1));
if (from_file) {
if (fgets(p, 255, ffp) != NULL) {
strtok(p, "\n\r");
return 0;
} else {
return 1;
}
}
signal(SIGINT, jmpproc);
buff = p;
move(maxy, x);
do {
switch (c = vgetc()) {
case BVICTRL('H'):
case KEY_BACKSPACE:
case KEY_LEFT:
if (p > buff) {
p--;
move(maxy, x);
n = x;
for (q = buff; q < p; q++) {
addch(*q);
n++;
}
addch(' ');
move(maxy, n);
} else {
*buff = '\0';
msg("");
attroff(COLOR_PAIR(C_CM + 1));
signal(SIGINT, SIG_IGN);
return 1;
}
break;
case ESC: /* abandon command */
*buff = '\0';
msg("");
attroff(COLOR_PAIR(C_CM + 1));
signal(SIGINT, SIG_IGN);
return 1;
#if NL != KEY_ENTER
case NL:
#endif
#if CR != KEY_ENTER
case CR:
#endif
case KEY_ENTER:
break;
default: /* a normal character */
addch(c);
*p++ = c;
break;
}
refresh();
} while (c != NL && c != CR && c != KEY_ENTER);
attroff(COLOR_PAIR(C_CM + 1));
*p = '\0';
signal(SIGINT, SIG_IGN);
return 0;
}
| XVilka/bvim | set.c | C | gpl-2.0 | 12,671 |
/**************************************************************
* Copyright (C) 2010 STMicroelectronics. All Rights Reserved.
* This file is part of the latest release of the Multicom4 project. This release
* is fully functional and provides all of the original MME functionality.This
* release is now considered stable and ready for integration with other software
* components.
* Multicom4 is a free software; you can redistribute it and/or modify it under the
* terms of the GNU General Public License as published by the Free Software Foundation
* version 2.
* Multicom4 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public License along with Multicom4;
* see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place -
* Suite 330, Boston, MA 02111-1307, USA.
* Written by Multicom team at STMicroelectronics in November 2010.
* Contact multicom.support@st.com.
**************************************************************/
/*
*
*/
/*
* sti7200 ST231 Video1
*/
#include <bsp/_bsp.h>
const char *bsp_cpu_name = "video1";
/*
* Local Variables:
* tab-width: 8
* c-indent-level: 2
* c-basic-offset: 2
* End:
*/
| project-magpie/tdt-driver | multicom-4.0.6/src/bsp/stx7200/st231/video1/name.c | C | gpl-2.0 | 1,425 |
/*
* linux/drivers/mmc/core/core.c
*
* Copyright (C) 2003-2004 Russell King, All Rights Reserved.
* SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
* Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
* MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/leds.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/fault-inject.h>
#include <linux/random.h>
#include <linux/wakelock.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include "core.h"
#include "bus.h"
#include "host.h"
#include "sdio_bus.h"
#include "mmc_ops.h"
#include "sd_ops.h"
#include "sdio_ops.h"
#define CREATE_TRACE_POINTS
#include <trace/events/mmc.h>
/* If the device is not responding */
#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
static void mmc_clk_scaling(struct mmc_host *host, bool from_wq);
/*
* Background operations can take a long time, depending on the housekeeping
* operations the card has to perform.
*/
#define MMC_BKOPS_MAX_TIMEOUT (30 * 1000) /* max time to wait in ms */
/* Flushing a large amount of cached data may take a long time. */
#define MMC_FLUSH_REQ_TIMEOUT_MS 30000 /* msec */
static struct workqueue_struct *workqueue;
/*
* Enabling software CRCs on the data blocks can be a significant (30%)
* performance cost, and for other reasons may not always be desired.
* So we allow it it to be disabled.
*/
bool use_spi_crc = 0;
module_param(use_spi_crc, bool, 0755);
/*
* We normally treat cards as removed during suspend if they are not
* known to be on a non-removable bus, to avoid the risk of writing
* back data to a different card after resume. Allow this to be
* overridden if necessary.
*/
#ifdef CONFIG_MMC_UNSAFE_RESUME
bool mmc_assume_removable;
#else
bool mmc_assume_removable = 1;
#endif
EXPORT_SYMBOL(mmc_assume_removable);
module_param_named(removable, mmc_assume_removable, bool, 0644);
MODULE_PARM_DESC(
removable,
"MMC/SD cards are removable and may be removed during suspend");
/*
* Internal function. Schedule delayed work in the MMC work queue.
*/
static int mmc_schedule_delayed_work(struct delayed_work *work,
unsigned long delay)
{
return queue_delayed_work(workqueue, work, delay);
}
/*
* Internal function. Flush all scheduled work from the MMC work queue.
*/
static void mmc_flush_scheduled_work(void)
{
flush_workqueue(workqueue);
}
#ifdef CONFIG_FAIL_MMC_REQUEST
/*
* Internal function. Inject random data errors.
* If mmc_data is NULL no errors are injected.
*/
static void mmc_should_fail_request(struct mmc_host *host,
struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
struct mmc_data *data = mrq->data;
static const int data_errors[] = {
-ETIMEDOUT,
-EILSEQ,
-EIO,
};
if (!data)
return;
if (cmd->error || data->error ||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;
data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
data->fault_injected = true;
}
#else /* CONFIG_FAIL_MMC_REQUEST */
static inline void mmc_should_fail_request(struct mmc_host *host,
struct mmc_request *mrq)
{
}
#endif /* CONFIG_FAIL_MMC_REQUEST */
/**
* mmc_request_done - finish processing an MMC request
* @host: MMC host which completed request
* @mrq: MMC request which request
*
* MMC drivers should call this function when they have completed
* their processing of a request.
*/
void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
int err = cmd->error;
#ifdef CONFIG_MMC_PERF_PROFILING
ktime_t diff;
#endif
if (host->card && host->clk_scaling.enable)
host->clk_scaling.busy_time_us +=
ktime_to_us(ktime_sub(ktime_get(),
host->clk_scaling.start_busy));
if (err && cmd->retries && mmc_host_is_spi(host)) {
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
cmd->retries = 0;
}
if (err && cmd->retries && !mmc_card_removed(host->card)) {
/*
* Request starter must handle retries - see
* mmc_wait_for_req_done().
*/
if (mrq->done)
mrq->done(mrq);
} else {
mmc_should_fail_request(host, mrq);
led_trigger_event(host->led, LED_OFF);
pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
mmc_hostname(host), cmd->opcode, err,
cmd->resp[0], cmd->resp[1],
cmd->resp[2], cmd->resp[3]);
if (mrq->data) {
#ifdef CONFIG_MMC_PERF_PROFILING
if (host->perf_enable) {
diff = ktime_sub(ktime_get(), host->perf.start);
if (mrq->data->flags == MMC_DATA_READ) {
host->perf.rbytes_drv +=
mrq->data->bytes_xfered;
host->perf.rtime_drv =
ktime_add(host->perf.rtime_drv,
diff);
} else {
host->perf.wbytes_drv +=
mrq->data->bytes_xfered;
host->perf.wtime_drv =
ktime_add(host->perf.wtime_drv,
diff);
}
}
#endif
pr_debug("%s: %d bytes transferred: %d\n",
mmc_hostname(host),
mrq->data->bytes_xfered, mrq->data->error);
}
if (mrq->stop) {
pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
mmc_hostname(host), mrq->stop->opcode,
mrq->stop->error,
mrq->stop->resp[0], mrq->stop->resp[1],
mrq->stop->resp[2], mrq->stop->resp[3]);
}
if (mrq->done)
mrq->done(mrq);
mmc_host_clk_release(host);
}
}
EXPORT_SYMBOL(mmc_request_done);
static void
mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
#ifdef CONFIG_MMC_DEBUG
unsigned int i, sz;
struct scatterlist *sg;
#endif
if (mrq->sbc) {
pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
mmc_hostname(host), mrq->sbc->opcode,
mrq->sbc->arg, mrq->sbc->flags);
}
pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
mmc_hostname(host), mrq->cmd->opcode,
mrq->cmd->arg, mrq->cmd->flags);
if (mrq->data) {
pr_debug("%s: blksz %d blocks %d flags %08x "
"tsac %d ms nsac %d\n",
mmc_hostname(host), mrq->data->blksz,
mrq->data->blocks, mrq->data->flags,
mrq->data->timeout_ns / 1000000,
mrq->data->timeout_clks);
}
if (mrq->stop) {
pr_debug("%s: CMD%u arg %08x flags %08x\n",
mmc_hostname(host), mrq->stop->opcode,
mrq->stop->arg, mrq->stop->flags);
}
WARN_ON(!host->claimed);
mrq->cmd->error = 0;
mrq->cmd->mrq = mrq;
if (mrq->data) {
BUG_ON(mrq->data->blksz > host->max_blk_size);
BUG_ON(mrq->data->blocks > host->max_blk_count);
BUG_ON(mrq->data->blocks * mrq->data->blksz >
host->max_req_size);
#ifdef CONFIG_MMC_DEBUG
sz = 0;
for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
sz += sg->length;
BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
#endif
mrq->cmd->data = mrq->data;
mrq->data->error = 0;
mrq->data->mrq = mrq;
if (mrq->stop) {
mrq->data->stop = mrq->stop;
mrq->stop->error = 0;
mrq->stop->mrq = mrq;
}
#ifdef CONFIG_MMC_PERF_PROFILING
if (host->perf_enable)
host->perf.start = ktime_get();
#endif
}
mmc_host_clk_hold(host);
led_trigger_event(host->led, LED_FULL);
if (host->card && host->clk_scaling.enable) {
/*
* Check if we need to scale the clocks. Clocks
* will be scaled up immediately if necessary
* conditions are satisfied. Scaling down the
* frequency will be done after current thread
* releases host.
*/
mmc_clk_scaling(host, false);
host->clk_scaling.start_busy = ktime_get();
}
host->ops->request(host, mrq);
}
/**
* mmc_start_delayed_bkops() - Start a delayed work to check for
* the need of non urgent BKOPS
*
* @card: MMC card to start BKOPS on
*/
void mmc_start_delayed_bkops(struct mmc_card *card)
{
if (!card || !card->ext_csd.bkops_en || mmc_card_doing_bkops(card))
return;
if (card->bkops_info.sectors_changed <
card->bkops_info.min_sectors_to_queue_delayed_work)
return;
pr_debug("%s: %s: queueing delayed_bkops_work\n",
mmc_hostname(card->host), __func__);
/*
* cancel_delayed_bkops_work will prevent a race condition between
* fetching a request by the mmcqd and the delayed work, in case
* it was removed from the queue work but not started yet
*/
card->bkops_info.cancel_delayed_work = false;
queue_delayed_work(system_nrt_wq, &card->bkops_info.dw,
msecs_to_jiffies(
card->bkops_info.delay_ms));
}
EXPORT_SYMBOL(mmc_start_delayed_bkops);
/**
* mmc_start_bkops - start BKOPS for supported cards
* @card: MMC card to start BKOPS
* @from_exception: A flag to indicate if this function was
* called due to an exception raised by the card
*
* Start background operations whenever requested.
* When the urgent BKOPS bit is set in a R1 command response
* then background operations should be started immediately.
*/
void mmc_start_bkops(struct mmc_card *card, bool from_exception)
{
int err;
BUG_ON(!card);
if (!card->ext_csd.bkops_en)
return;
if ((card->bkops_info.cancel_delayed_work) && !from_exception) {
pr_debug("%s: %s: cancel_delayed_work was set, exit\n",
mmc_hostname(card->host), __func__);
card->bkops_info.cancel_delayed_work = false;
return;
}
/* In case of delayed bkops we might be in race with suspend. */
if (!mmc_try_claim_host(card->host))
return;
/*
* Since the cancel_delayed_work can be changed while we are waiting
* for the lock we will to re-check it
*/
if ((card->bkops_info.cancel_delayed_work) && !from_exception) {
pr_debug("%s: %s: cancel_delayed_work was set, exit\n",
mmc_hostname(card->host), __func__);
card->bkops_info.cancel_delayed_work = false;
goto out;
}
if (mmc_card_doing_bkops(card)) {
pr_debug("%s: %s: already doing bkops, exit\n",
mmc_hostname(card->host), __func__);
goto out;
}
if (from_exception && mmc_card_need_bkops(card))
goto out;
/*
* If the need BKOPS flag is set, there is no need to check if BKOPS
* is needed since we already know that it does
*/
if (!mmc_card_need_bkops(card)) {
err = mmc_read_bkops_status(card);
if (err) {
pr_err("%s: %s: Failed to read bkops status: %d\n",
mmc_hostname(card->host), __func__, err);
goto out;
}
if (!card->ext_csd.raw_bkops_status)
goto out;
pr_info("%s: %s: raw_bkops_status=0x%x, from_exception=%d\n",
mmc_hostname(card->host), __func__,
card->ext_csd.raw_bkops_status,
from_exception);
}
/*
* If the function was called due to exception, BKOPS will be performed
* after handling the last pending request
*/
if (from_exception) {
pr_debug("%s: %s: Level %d from exception, exit",
mmc_hostname(card->host), __func__,
card->ext_csd.raw_bkops_status);
mmc_card_set_need_bkops(card);
goto out;
}
pr_info("%s: %s: Starting bkops\n", mmc_hostname(card->host), __func__);
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BKOPS_START, 1, 0, false, false);
if (err) {
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
goto out;
}
mmc_card_clr_need_bkops(card);
mmc_card_set_doing_bkops(card);
out:
mmc_release_host(card->host);
}
EXPORT_SYMBOL(mmc_start_bkops);
/**
* mmc_start_idle_time_bkops() - check if a non urgent BKOPS is
* needed
* @work: The idle time BKOPS work
*/
void mmc_start_idle_time_bkops(struct work_struct *work)
{
struct mmc_card *card = container_of(work, struct mmc_card,
bkops_info.dw.work);
/*
* Prevent a race condition between mmc_stop_bkops and the delayed
* BKOPS work in case the delayed work is executed on another CPU
*/
if (card->bkops_info.cancel_delayed_work)
return;
mmc_start_bkops(card, false);
}
EXPORT_SYMBOL(mmc_start_idle_time_bkops);
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
}
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
if (mmc_card_removed(host->card)) {
mrq->cmd->error = -ENOMEDIUM;
complete(&mrq->completion);
return -ENOMEDIUM;
}
mmc_start_request(host, mrq);
return 0;
}
static void mmc_wait_for_req_done(struct mmc_host *host,
struct mmc_request *mrq)
{
struct mmc_command *cmd;
while (1) {
wait_for_completion_io(&mrq->completion);
cmd = mrq->cmd;
/*
* If host has timed out waiting for the commands which can be
* HPIed then let the caller handle the timeout error as it may
* want to send the HPI command to bring the card out of
* programming state.
*/
if (cmd->ignore_timeout && cmd->error == -ETIMEDOUT)
break;
if (!cmd->error || !cmd->retries ||
mmc_card_removed(host->card))
break;
pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
mmc_hostname(host), cmd->opcode, cmd->error);
cmd->retries--;
cmd->error = 0;
host->ops->request(host, mrq);
}
}
/**
* mmc_pre_req - Prepare for a new request
* @host: MMC host to prepare command
* @mrq: MMC request to prepare for
* @is_first_req: true if there is no previous started request
* that may run in parellel to this call, otherwise false
*
* mmc_pre_req() is called in prior to mmc_start_req() to let
* host prepare for the new request. Preparation of a request may be
* performed while another request is running on the host.
*/
static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
bool is_first_req)
{
if (host->ops->pre_req) {
mmc_host_clk_hold(host);
host->ops->pre_req(host, mrq, is_first_req);
mmc_host_clk_release(host);
}
}
/**
* mmc_post_req - Post process a completed request
* @host: MMC host to post process command
* @mrq: MMC request to post process for
* @err: Error, if non zero, clean up any resources made in pre_req
*
* Let the host post process a completed request. Post processing of
* a request may be performed while another reuqest is running.
*/
static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
int err)
{
if (host->ops->post_req) {
mmc_host_clk_hold(host);
host->ops->post_req(host, mrq, err);
mmc_host_clk_release(host);
}
}
/**
* mmc_start_req - start a non-blocking request
* @host: MMC host to start command
* @areq: async request to start
* @error: out parameter returns 0 for success, otherwise non zero
*
* Start a new MMC custom command request for a host.
* If there is on ongoing async request wait for completion
* of that request and start the new one and return.
* Does not wait for the new request to complete.
*
* Returns the completed request, NULL in case of none completed.
* Wait for the an ongoing request (previoulsy started) to complete and
* return the completed request. If there is no ongoing request, NULL
* is returned without waiting. NULL is not an error condition.
*/
struct mmc_async_req *mmc_start_req(struct mmc_host *host,
struct mmc_async_req *areq, int *error)
{
int err = 0;
int start_err = 0;
struct mmc_async_req *data = host->areq;
/* Prepare a new request */
if (areq)
mmc_pre_req(host, areq->mrq, !host->areq);
if (host->areq) {
mmc_wait_for_req_done(host, host->areq->mrq);
err = host->areq->err_check(host->card, host->areq);
/*
* Check BKOPS urgency for each R1 response
*/
if (host->card && mmc_card_mmc(host->card) &&
((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
(mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
(host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT))
mmc_start_bkops(host->card, true);
}
if (!err && areq)
start_err = __mmc_start_req(host, areq->mrq);
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
/* Cancel a prepared request if it was not started. */
if ((err || start_err) && areq)
mmc_post_req(host, areq->mrq, -EINVAL);
if (err)
host->areq = NULL;
else
host->areq = areq;
if (error)
*error = err;
return data;
}
EXPORT_SYMBOL(mmc_start_req);
/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
* @mrq: MMC request to start
*
* Start a new MMC custom command request for a host, and wait
* for the command to complete. Does not attempt to parse the
* response.
*/
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
__mmc_start_req(host, mrq);
mmc_wait_for_req_done(host, mrq);
}
EXPORT_SYMBOL(mmc_wait_for_req);
bool mmc_card_is_prog_state(struct mmc_card *card)
{
bool rc;
struct mmc_command cmd;
mmc_claim_host(card->host);
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
rc = mmc_wait_for_cmd(card->host, &cmd, 0);
if (rc) {
pr_err("%s: Get card status fail. rc=%d\n",
mmc_hostname(card->host), rc);
rc = false;
goto out;
}
if (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG)
rc = true;
else
rc = false;
out:
mmc_release_host(card->host);
return rc;
}
EXPORT_SYMBOL(mmc_card_is_prog_state);
/**
* mmc_interrupt_hpi - Issue for High priority Interrupt
* @card: the MMC card associated with the HPI transfer
*
* Issued High Priority Interrupt, and check for card status
* until out-of prg-state.
*/
int mmc_interrupt_hpi(struct mmc_card *card)
{
int err;
u32 status;
BUG_ON(!card);
if (!card->ext_csd.hpi_en) {
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
return 1;
}
mmc_claim_host(card->host);
err = mmc_send_status(card, &status);
if (err) {
pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
goto out;
}
/*
* If the card status is in PRG-state, we can send the HPI command.
*/
if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
do {
/*
* We don't know when the HPI command will finish
* processing, so we need to resend HPI until out
* of prg-state, and keep checking the card status
* with SEND_STATUS. If a timeout error occurs when
* sending the HPI command, we are already out of
* prg-state.
*/
err = mmc_send_hpi_cmd(card, &status);
if (err)
pr_debug("%s: abort HPI (%d error)\n",
mmc_hostname(card->host), err);
err = mmc_send_status(card, &status);
if (err)
break;
} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
} else
pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
out:
mmc_release_host(card->host);
return err;
}
EXPORT_SYMBOL(mmc_interrupt_hpi);
/**
* mmc_wait_for_cmd - start a command and wait for completion
* @host: MMC host to start command
* @cmd: MMC command to start
* @retries: maximum number of retries
*
* Start a new MMC command for a host, and wait for the command
* to complete. Return any error that occurred while the command
* was executing. Do not attempt to parse the response.
*/
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
{
struct mmc_request mrq = {NULL};
WARN_ON(!host->claimed);
memset(cmd->resp, 0, sizeof(cmd->resp));
cmd->retries = retries;
mrq.cmd = cmd;
cmd->data = NULL;
mmc_wait_for_req(host, &mrq);
return cmd->error;
}
EXPORT_SYMBOL(mmc_wait_for_cmd);
/**
* mmc_stop_bkops - stop ongoing BKOPS
* @card: MMC card to check BKOPS
*
* Send HPI command to stop ongoing background operations to
* allow rapid servicing of foreground operations, e.g. read/
* writes. Wait until the card comes out of the programming state
* to avoid errors in servicing read/write requests.
*
* The function should be called with host claimed.
*/
int mmc_stop_bkops(struct mmc_card *card)
{
int err = 0;
BUG_ON(!card);
/*
* Notify the delayed work to be cancelled, in case it was already
* removed from the queue, but was not started yet
*/
card->bkops_info.cancel_delayed_work = true;
if (delayed_work_pending(&card->bkops_info.dw))
cancel_delayed_work_sync(&card->bkops_info.dw);
if (!mmc_card_doing_bkops(card))
goto out;
/*
* If idle time bkops is running on the card, let's not get into
* suspend.
*/
if (mmc_card_doing_bkops(card)
&& (card->host->parent->power.runtime_status == RPM_SUSPENDING)
&& mmc_card_is_prog_state(card)) {
err = -EBUSY;
goto out;
}
err = mmc_interrupt_hpi(card);
/*
* If err is EINVAL, we can't issue an HPI.
* It should complete the BKOPS.
*/
if (!err || (err == -EINVAL)) {
mmc_card_clr_doing_bkops(card);
err = 0;
}
out:
return err;
}
EXPORT_SYMBOL(mmc_stop_bkops);
int mmc_read_bkops_status(struct mmc_card *card)
{
int err;
u8 *ext_csd;
/*
* In future work, we should consider storing the entire ext_csd.
*/
ext_csd = kmalloc(512, GFP_KERNEL);
if (!ext_csd) {
pr_err("%s: could not allocate buffer to receive the ext_csd.\n",
mmc_hostname(card->host));
return -ENOMEM;
}
mmc_claim_host(card->host);
err = mmc_send_ext_csd(card, ext_csd);
mmc_release_host(card->host);
if (err)
goto out;
card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
out:
kfree(ext_csd);
return err;
}
EXPORT_SYMBOL(mmc_read_bkops_status);
/**
* mmc_set_data_timeout - set the timeout for a data command
* @data: data phase for command
* @card: the MMC card associated with the data transfer
*
* Computes the data timeout parameters according to the
* correct algorithm given the card type.
*/
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
{
unsigned int mult;
/*
* SDIO cards only define an upper 1 s limit on access.
*/
if (mmc_card_sdio(card)) {
data->timeout_ns = 1000000000;
data->timeout_clks = 0;
return;
}
/*
* SD cards use a 100 multiplier rather than 10
*/
mult = mmc_card_sd(card) ? 100 : 10;
/*
* Scale up the multiplier (and therefore the timeout) by
* the r2w factor for writes.
*/
if (data->flags & MMC_DATA_WRITE)
mult <<= card->csd.r2w_factor;
data->timeout_ns = card->csd.tacc_ns * mult;
data->timeout_clks = card->csd.tacc_clks * mult;
/*
* SD cards also have an upper limit on the timeout.
*/
if (mmc_card_sd(card)) {
unsigned int timeout_us, limit_us;
timeout_us = data->timeout_ns / 1000;
if (mmc_host_clk_rate(card->host))
timeout_us += data->timeout_clks * 1000 /
(mmc_host_clk_rate(card->host) / 1000);
if (data->flags & MMC_DATA_WRITE)
/*
* The MMC spec "It is strongly recommended
* for hosts to implement more than 500ms
* timeout value even if the card indicates
* the 250ms maximum busy length." Even the
* previous value of 300ms is known to be
* insufficient for some cards.
*/
limit_us = 3000000;
else
limit_us = 100000;
/*
* SDHC cards always use these fixed values.
*/
if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
data->timeout_ns = limit_us * 1000;
data->timeout_clks = 0;
}
}
/*
* Some cards require longer data read timeout than indicated in CSD.
* Address this by setting the read timeout to a "reasonably high"
* value. For the cards tested, 300ms has proven enough. If necessary,
* this value can be increased if other problematic cards require this.
*/
if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
data->timeout_ns = 300000000;
data->timeout_clks = 0;
}
/*
* Some cards need very high timeouts if driven in SPI mode.
* The worst observed timeout was 900ms after writing a
* continuous stream of data until the internal logic
* overflowed.
*/
if (mmc_host_is_spi(card->host)) {
if (data->flags & MMC_DATA_WRITE) {
if (data->timeout_ns < 1000000000)
data->timeout_ns = 1000000000; /* 1s */
} else {
if (data->timeout_ns < 100000000)
data->timeout_ns = 100000000; /* 100ms */
}
}
/* Increase the timeout values for some bad INAND MCP devices */
if (card->quirks & MMC_QUIRK_INAND_DATA_TIMEOUT) {
data->timeout_ns = 4000000000u; /* 4s */
data->timeout_clks = 0;
}
}
EXPORT_SYMBOL(mmc_set_data_timeout);
/**
* mmc_align_data_size - pads a transfer size to a more optimal value
* @card: the MMC card associated with the data transfer
* @sz: original transfer size
*
* Pads the original data size with a number of extra bytes in
* order to avoid controller bugs and/or performance hits
* (e.g. some controllers revert to PIO for certain sizes).
*
* Returns the improved size, which might be unmodified.
*
* Note that this function is only relevant when issuing a
* single scatter gather entry.
*/
unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
{
/*
* FIXME: We don't have a system for the controller to tell
* the core about its problems yet, so for now we just 32-bit
* align the size.
*/
sz = ((sz + 3) / 4) * 4;
return sz;
}
EXPORT_SYMBOL(mmc_align_data_size);
/**
* __mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
* @abort: whether or not the operation should be aborted
*
* Claim a host for a set of operations. If @abort is non null and
* dereference a non-zero value then this will return prematurely with
* that non-zero value without acquiring the lock. Returns zero
* with the lock held otherwise.
*/
int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
int stop;
might_sleep();
add_wait_queue(&host->wq, &wait);
spin_lock_irqsave(&host->lock, flags);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
stop = abort ? atomic_read(abort) : 0;
if (stop || !host->claimed || host->claimer == current)
break;
spin_unlock_irqrestore(&host->lock, flags);
schedule();
spin_lock_irqsave(&host->lock, flags);
}
set_current_state(TASK_RUNNING);
if (!stop) {
host->claimed = 1;
host->claimer = current;
host->claim_cnt += 1;
} else
wake_up(&host->wq);
spin_unlock_irqrestore(&host->lock, flags);
remove_wait_queue(&host->wq, &wait);
if (host->ops->enable && !stop && host->claim_cnt == 1)
host->ops->enable(host);
return stop;
}
EXPORT_SYMBOL(__mmc_claim_host);
/**
* mmc_try_claim_host - try exclusively to claim a host
* @host: mmc host to claim
*
* Returns %1 if the host is claimed, %0 otherwise.
*/
int mmc_try_claim_host(struct mmc_host *host)
{
int claimed_host = 0;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
if (!host->claimed || host->claimer == current) {
host->claimed = 1;
host->claimer = current;
host->claim_cnt += 1;
claimed_host = 1;
}
spin_unlock_irqrestore(&host->lock, flags);
if (host->ops->enable && claimed_host && host->claim_cnt == 1)
host->ops->enable(host);
return claimed_host;
}
EXPORT_SYMBOL(mmc_try_claim_host);
/**
* mmc_release_host - release a host
* @host: mmc host to release
*
* Release a MMC host, allowing others to claim the host
* for their operations.
*/
void mmc_release_host(struct mmc_host *host)
{
unsigned long flags;
WARN_ON(!host->claimed);
if (host->ops->disable && host->claim_cnt == 1)
host->ops->disable(host);
spin_lock_irqsave(&host->lock, flags);
if (--host->claim_cnt) {
/* Release for nested claim */
spin_unlock_irqrestore(&host->lock, flags);
} else {
host->claimed = 0;
host->claimer = NULL;
spin_unlock_irqrestore(&host->lock, flags);
wake_up(&host->wq);
}
}
EXPORT_SYMBOL(mmc_release_host);
/*
* Internal function that does the actual ios call to the host driver,
* optionally printing some debug output.
*/
void mmc_set_ios(struct mmc_host *host)
{
struct mmc_ios *ios = &host->ios;
pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
"width %u timing %u\n",
mmc_hostname(host), ios->clock, ios->bus_mode,
ios->power_mode, ios->chip_select, ios->vdd,
ios->bus_width, ios->timing);
if (ios->clock > 0)
mmc_set_ungated(host);
host->ops->set_ios(host, ios);
if (ios->old_rate != ios->clock) {
if (likely(ios->clk_ts)) {
char trace_info[80];
snprintf(trace_info, 80,
"%s: freq_KHz %d --> %d | t = %d",
mmc_hostname(host), ios->old_rate / 1000,
ios->clock / 1000, jiffies_to_msecs(
(long)jiffies - (long)ios->clk_ts));
trace_mmc_clk(trace_info);
}
ios->old_rate = ios->clock;
ios->clk_ts = jiffies;
}
}
EXPORT_SYMBOL(mmc_set_ios);
/*
* Control chip select pin on a host.
*/
void mmc_set_chip_select(struct mmc_host *host, int mode)
{
mmc_host_clk_hold(host);
host->ios.chip_select = mode;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/*
* Sets the host clock to the highest possible frequency that
* is below "hz".
*/
static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
{
WARN_ON(hz < host->f_min);
if (hz > host->f_max)
hz = host->f_max;
host->ios.clock = hz;
mmc_set_ios(host);
}
void mmc_set_clock(struct mmc_host *host, unsigned int hz)
{
mmc_host_clk_hold(host);
__mmc_set_clock(host, hz);
mmc_host_clk_release(host);
}
#ifdef CONFIG_MMC_CLKGATE
/*
* This gates the clock by setting it to 0 Hz.
*/
void mmc_gate_clock(struct mmc_host *host)
{
unsigned long flags;
WARN_ON(!host->ios.clock);
spin_lock_irqsave(&host->clk_lock, flags);
host->clk_old = host->ios.clock;
host->ios.clock = 0;
host->clk_gated = true;
spin_unlock_irqrestore(&host->clk_lock, flags);
mmc_set_ios(host);
}
/*
* This restores the clock from gating by using the cached
* clock value.
*/
void mmc_ungate_clock(struct mmc_host *host)
{
/*
* We should previously have gated the clock, so the clock shall
* be 0 here! The clock may however be 0 during initialization,
* when some request operations are performed before setting
* the frequency. When ungate is requested in that situation
* we just ignore the call.
*/
if (host->clk_old) {
WARN_ON(host->ios.clock);
/* This call will also set host->clk_gated to false */
__mmc_set_clock(host, host->clk_old);
}
}
void mmc_set_ungated(struct mmc_host *host)
{
unsigned long flags;
/*
* We've been given a new frequency while the clock is gated,
* so make sure we regard this as ungating it.
*/
spin_lock_irqsave(&host->clk_lock, flags);
host->clk_gated = false;
spin_unlock_irqrestore(&host->clk_lock, flags);
}
#else
void mmc_set_ungated(struct mmc_host *host)
{
}
#endif
/*
* Change the bus mode (open drain/push-pull) of a host.
*/
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
{
mmc_host_clk_hold(host);
host->ios.bus_mode = mode;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/*
* Change data bus width of a host.
*/
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
{
mmc_host_clk_hold(host);
host->ios.bus_width = width;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/**
* mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
* @vdd: voltage (mV)
* @low_bits: prefer low bits in boundary cases
*
* This function returns the OCR bit number according to the provided @vdd
* value. If conversion is not possible a negative errno value returned.
*
* Depending on the @low_bits flag the function prefers low or high OCR bits
* on boundary voltages. For example,
* with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
* with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
*
* Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
*/
static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
{
const int max_bit = ilog2(MMC_VDD_35_36);
int bit;
if (vdd < 1650 || vdd > 3600)
return -EINVAL;
if (vdd >= 1650 && vdd <= 1950)
return ilog2(MMC_VDD_165_195);
if (low_bits)
vdd -= 1;
/* Base 2000 mV, step 100 mV, bit's base 8. */
bit = (vdd - 2000) / 100 + 8;
if (bit > max_bit)
return max_bit;
return bit;
}
/**
* mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
* @vdd_min: minimum voltage value (mV)
* @vdd_max: maximum voltage value (mV)
*
* This function returns the OCR mask bits according to the provided @vdd_min
* and @vdd_max values. If conversion is not possible the function returns 0.
*
* Notes wrt boundary cases:
* This function sets the OCR bits for all boundary voltages, for example
* [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
* MMC_VDD_34_35 mask.
*/
u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
{
u32 mask = 0;
if (vdd_max < vdd_min)
return 0;
/* Prefer high bits for the boundary vdd_max values. */
vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
if (vdd_max < 0)
return 0;
/* Prefer low bits for the boundary vdd_min values. */
vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
if (vdd_min < 0)
return 0;
/* Fill the mask, from max bit to min bit. */
while (vdd_max >= vdd_min)
mask |= 1 << vdd_max--;
return mask;
}
EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
#ifdef CONFIG_REGULATOR
/**
* mmc_regulator_get_ocrmask - return mask of supported voltages
* @supply: regulator to use
*
* This returns either a negative errno, or a mask of voltages that
* can be provided to MMC/SD/SDIO devices using the specified voltage
* regulator. This would normally be called before registering the
* MMC host adapter.
*/
int mmc_regulator_get_ocrmask(struct regulator *supply)
{
int result = 0;
int count;
int i;
count = regulator_count_voltages(supply);
if (count < 0)
return count;
for (i = 0; i < count; i++) {
int vdd_uV;
int vdd_mV;
vdd_uV = regulator_list_voltage(supply, i);
if (vdd_uV <= 0)
continue;
vdd_mV = vdd_uV / 1000;
result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
}
return result;
}
EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
/**
* mmc_regulator_set_ocr - set regulator to match host->ios voltage
* @mmc: the host to regulate
* @supply: regulator to use
* @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
*
* Returns zero on success, else negative errno.
*
* MMC host drivers may use this to enable or disable a regulator using
* a particular supply voltage. This would normally be called from the
* set_ios() method.
*/
int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit)
{
int result = 0;
int min_uV, max_uV;
if (vdd_bit) {
int tmp;
int voltage;
/* REVISIT mmc_vddrange_to_ocrmask() may have set some
* bits this regulator doesn't quite support ... don't
* be too picky, most cards and regulators are OK with
* a 0.1V range goof (it's a small error percentage).
*/
tmp = vdd_bit - ilog2(MMC_VDD_165_195);
if (tmp == 0) {
min_uV = 1650 * 1000;
max_uV = 1950 * 1000;
} else {
min_uV = 1900 * 1000 + tmp * 100 * 1000;
max_uV = min_uV + 100 * 1000;
}
/* avoid needless changes to this voltage; the regulator
* might not allow this operation
*/
voltage = regulator_get_voltage(supply);
if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE)
min_uV = max_uV = voltage;
if (voltage < 0)
result = voltage;
else if (voltage < min_uV || voltage > max_uV)
result = regulator_set_voltage(supply, min_uV, max_uV);
else
result = 0;
if (result == 0 && !mmc->regulator_enabled) {
result = regulator_enable(supply);
if (!result)
mmc->regulator_enabled = true;
}
} else if (mmc->regulator_enabled) {
result = regulator_disable(supply);
if (result == 0)
mmc->regulator_enabled = false;
}
if (result)
dev_err(mmc_dev(mmc),
"could not set regulator OCR (%d)\n", result);
return result;
}
EXPORT_SYMBOL(mmc_regulator_set_ocr);
#endif /* CONFIG_REGULATOR */
/*
* Mask off any voltages we don't support and select
* the lowest voltage
*/
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
{
int bit;
ocr &= host->ocr_avail;
bit = ffs(ocr);
if (bit) {
bit -= 1;
ocr &= 3 << bit;
mmc_host_clk_hold(host);
host->ios.vdd = bit;
mmc_set_ios(host);
mmc_host_clk_release(host);
} else {
pr_warning("%s: host doesn't support card's voltages\n",
mmc_hostname(host));
ocr = 0;
}
return ocr;
}
int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
{
struct mmc_command cmd = {0};
int err = 0;
BUG_ON(!host);
/*
* Send CMD11 only if the request is to switch the card to
* 1.8V signalling.
*/
if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
cmd.opcode = SD_SWITCH_VOLTAGE;
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
return err;
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
return -EIO;
}
host->ios.signal_voltage = signal_voltage;
if (host->ops->start_signal_voltage_switch) {
mmc_host_clk_hold(host);
err = host->ops->start_signal_voltage_switch(host, &host->ios);
mmc_host_clk_release(host);
}
return err;
}
/*
* Select timing parameters for host.
*/
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
{
mmc_host_clk_hold(host);
host->ios.timing = timing;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/*
* Select appropriate driver type for host.
*/
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
{
mmc_host_clk_hold(host);
host->ios.drv_type = drv_type;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/*
* Apply power to the MMC stack. This is a two-stage process.
* First, we enable power to the card without the clock running.
* We then wait a bit for the power to stabilise. Finally,
* enable the bus drivers and clock to the card.
*
* We must _NOT_ enable the clock prior to power stablising.
*
* If a host does all the power sequencing itself, ignore the
* initial MMC_POWER_UP stage.
*/
void mmc_power_up(struct mmc_host *host)
{
int bit;
mmc_host_clk_hold(host);
/* If ocr is set, we use it */
if (host->ocr)
bit = ffs(host->ocr) - 1;
else
bit = fls(host->ocr_avail) - 1;
host->ios.vdd = bit;
if (mmc_host_is_spi(host))
host->ios.chip_select = MMC_CS_HIGH;
else {
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
}
host->ios.power_mode = MMC_POWER_UP;
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
/*
* This delay should be sufficient to allow the power supply
* to reach the minimum voltage.
*/
mmc_delay(10);
host->ios.clock = host->f_init;
host->ios.power_mode = MMC_POWER_ON;
mmc_set_ios(host);
/*
* This delay must be at least 74 clock sizes, or 1 ms, or the
* time required to reach a stable voltage.
*/
mmc_delay(10);
mmc_host_clk_release(host);
}
void mmc_power_off(struct mmc_host *host)
{
mmc_host_clk_hold(host);
host->ios.clock = 0;
host->ios.vdd = 0;
/*
* Reset ocr mask to be the highest possible voltage supported for
* this mmc host. This value will be used at next power up.
*/
host->ocr = 1 << (fls(host->ocr_avail) - 1);
if (!mmc_host_is_spi(host)) {
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
host->ios.chip_select = MMC_CS_DONTCARE;
}
host->ios.power_mode = MMC_POWER_OFF;
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
/*
* Some configurations, such as the 802.11 SDIO card in the OLPC
* XO-1.5, require a short delay after poweroff before the card
* can be successfully turned on again.
*/
mmc_delay(1);
mmc_host_clk_release(host);
}
/*
* Cleanup when the last reference to the bus operator is dropped.
*/
static void __mmc_release_bus(struct mmc_host *host)
{
BUG_ON(!host);
BUG_ON(host->bus_refs);
BUG_ON(!host->bus_dead);
host->bus_ops = NULL;
}
/*
* Increase reference count of bus operator
*/
static inline void mmc_bus_get(struct mmc_host *host)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->bus_refs++;
spin_unlock_irqrestore(&host->lock, flags);
}
/*
* Decrease reference count of bus operator and free it if
* it is the last reference.
*/
static inline void mmc_bus_put(struct mmc_host *host)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->bus_refs--;
if ((host->bus_refs == 0) && host->bus_ops)
__mmc_release_bus(host);
spin_unlock_irqrestore(&host->lock, flags);
}
int mmc_resume_bus(struct mmc_host *host)
{
unsigned long flags;
if (!mmc_bus_needs_resume(host))
return -EINVAL;
printk("%s: Starting deferred resume\n", mmc_hostname(host));
spin_lock_irqsave(&host->lock, flags);
host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
mmc_power_up(host);
BUG_ON(!host->bus_ops->resume);
host->bus_ops->resume(host);
}
if (host->bus_ops->detect && !host->bus_dead)
host->bus_ops->detect(host);
mmc_bus_put(host);
printk("%s: Deferred resume completed\n", mmc_hostname(host));
return 0;
}
EXPORT_SYMBOL(mmc_resume_bus);
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
*/
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
{
unsigned long flags;
BUG_ON(!host);
BUG_ON(!ops);
WARN_ON(!host->claimed);
spin_lock_irqsave(&host->lock, flags);
BUG_ON(host->bus_ops);
BUG_ON(host->bus_refs);
host->bus_ops = ops;
host->bus_refs = 1;
host->bus_dead = 0;
spin_unlock_irqrestore(&host->lock, flags);
}
/*
* Remove the current bus handler from a host.
*/
void mmc_detach_bus(struct mmc_host *host)
{
unsigned long flags;
BUG_ON(!host);
WARN_ON(!host->claimed);
WARN_ON(!host->bus_ops);
spin_lock_irqsave(&host->lock, flags);
host->bus_dead = 1;
spin_unlock_irqrestore(&host->lock, flags);
mmc_bus_put(host);
}
/**
* mmc_detect_change - process change of state on a MMC socket
* @host: host which changed state.
* @delay: optional delay to wait before detection (jiffies)
*
* MMC drivers should call this when they detect a card has been
* inserted or removed. The MMC layer will confirm that any
* present card is still functional, and initialize any newly
* inserted.
*/
void mmc_detect_change(struct mmc_host *host, unsigned long delay)
{
#ifdef CONFIG_MMC_DEBUG
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
WARN_ON(host->removed);
spin_unlock_irqrestore(&host->lock, flags);
#endif
host->detect_change = 1;
wake_lock(&host->detect_wake_lock);
mmc_schedule_delayed_work(&host->detect, delay);
}
EXPORT_SYMBOL(mmc_detect_change);
void mmc_init_erase(struct mmc_card *card)
{
unsigned int sz;
if (is_power_of_2(card->erase_size))
card->erase_shift = ffs(card->erase_size) - 1;
else
card->erase_shift = 0;
/*
* It is possible to erase an arbitrarily large area of an SD or MMC
* card. That is not desirable because it can take a long time
* (minutes) potentially delaying more important I/O, and also the
* timeout calculations become increasingly hugely over-estimated.
* Consequently, 'pref_erase' is defined as a guide to limit erases
* to that size and alignment.
*
* For SD cards that define Allocation Unit size, limit erases to one
* Allocation Unit at a time. For MMC cards that define High Capacity
* Erase Size, whether it is switched on or not, limit to that size.
* Otherwise just have a stab at a good value. For modern cards it
* will end up being 4MiB. Note that if the value is too small, it
* can end up taking longer to erase.
*/
if (mmc_card_sd(card) && card->ssr.au) {
card->pref_erase = card->ssr.au;
card->erase_shift = ffs(card->ssr.au) - 1;
} else if (card->ext_csd.hc_erase_size) {
card->pref_erase = card->ext_csd.hc_erase_size;
} else {
sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
if (sz < 128)
card->pref_erase = 512 * 1024 / 512;
else if (sz < 512)
card->pref_erase = 1024 * 1024 / 512;
else if (sz < 1024)
card->pref_erase = 2 * 1024 * 1024 / 512;
else
card->pref_erase = 4 * 1024 * 1024 / 512;
if (card->pref_erase < card->erase_size)
card->pref_erase = card->erase_size;
else {
sz = card->pref_erase % card->erase_size;
if (sz)
card->pref_erase += card->erase_size - sz;
}
}
}
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
unsigned int arg, unsigned int qty)
{
unsigned int erase_timeout;
if (arg == MMC_DISCARD_ARG ||
(arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
erase_timeout = card->ext_csd.trim_timeout;
} else if (card->ext_csd.erase_group_def & 1) {
/* High Capacity Erase Group Size uses HC timeouts */
if (arg == MMC_TRIM_ARG)
erase_timeout = card->ext_csd.trim_timeout;
else
erase_timeout = card->ext_csd.hc_erase_timeout;
} else {
/* CSD Erase Group Size uses write timeout */
unsigned int mult = (10 << card->csd.r2w_factor);
unsigned int timeout_clks = card->csd.tacc_clks * mult;
unsigned int timeout_us;
/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
if (card->csd.tacc_ns < 1000000)
timeout_us = (card->csd.tacc_ns * mult) / 1000;
else
timeout_us = (card->csd.tacc_ns / 1000) * mult;
/*
* ios.clock is only a target. The real clock rate might be
* less but not that much less, so fudge it by multiplying by 2.
*/
timeout_clks <<= 1;
timeout_us += (timeout_clks * 1000) /
(mmc_host_clk_rate(card->host) / 1000);
erase_timeout = timeout_us / 1000;
/*
* Theoretically, the calculation could underflow so round up
* to 1ms in that case.
*/
if (!erase_timeout)
erase_timeout = 1;
}
/* Multiplier for secure operations */
if (arg & MMC_SECURE_ARGS) {
if (arg == MMC_SECURE_ERASE_ARG)
erase_timeout *= card->ext_csd.sec_erase_mult;
else
erase_timeout *= card->ext_csd.sec_trim_mult;
}
erase_timeout *= qty;
/*
* Ensure at least a 1 second timeout for SPI as per
* 'mmc_set_data_timeout()'
*/
if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
erase_timeout = 1000;
return erase_timeout;
}
static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
unsigned int arg,
unsigned int qty)
{
unsigned int erase_timeout;
if (card->ssr.erase_timeout) {
/* Erase timeout specified in SD Status Register (SSR) */
erase_timeout = card->ssr.erase_timeout * qty +
card->ssr.erase_offset;
} else {
/*
* Erase timeout not specified in SD Status Register (SSR) so
* use 250ms per write block.
*/
erase_timeout = 250 * qty;
}
/* Must not be less than 1 second */
if (erase_timeout < 1000)
erase_timeout = 1000;
return erase_timeout;
}
static unsigned int mmc_erase_timeout(struct mmc_card *card,
unsigned int arg,
unsigned int qty)
{
if (mmc_card_sd(card))
return mmc_sd_erase_timeout(card, arg, qty);
else
return mmc_mmc_erase_timeout(card, arg, qty);
}
static int mmc_do_erase(struct mmc_card *card, unsigned int from,
unsigned int to, unsigned int arg)
{
struct mmc_command cmd = {0};
unsigned int qty = 0;
unsigned long timeout;
int err;
/*
* qty is used to calculate the erase timeout which depends on how many
* erase groups (or allocation units in SD terminology) are affected.
* We count erasing part of an erase group as one erase group.
* For SD, the allocation units are always a power of 2. For MMC, the
* erase group size is almost certainly also power of 2, but it does not
* seem to insist on that in the JEDEC standard, so we fall back to
* division in that case. SD may not specify an allocation unit size,
* in which case the timeout is based on the number of write blocks.
*
* Note that the timeout for secure trim 2 will only be correct if the
* number of erase groups specified is the same as the total of all
* preceding secure trim 1 commands. Since the power may have been
* lost since the secure trim 1 commands occurred, it is generally
* impossible to calculate the secure trim 2 timeout correctly.
*/
if (card->erase_shift)
qty += ((to >> card->erase_shift) -
(from >> card->erase_shift)) + 1;
else if (mmc_card_sd(card))
qty += to - from + 1;
else
qty += ((to / card->erase_size) -
(from / card->erase_size)) + 1;
if (!mmc_card_blockaddr(card)) {
from <<= 9;
to <<= 9;
}
if (mmc_card_sd(card))
cmd.opcode = SD_ERASE_WR_BLK_START;
else
cmd.opcode = MMC_ERASE_GROUP_START;
cmd.arg = from;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: group start error %d, "
"status %#x\n", err, cmd.resp[0]);
err = -EIO;
goto out;
}
memset(&cmd, 0, sizeof(struct mmc_command));
if (mmc_card_sd(card))
cmd.opcode = SD_ERASE_WR_BLK_END;
else
cmd.opcode = MMC_ERASE_GROUP_END;
cmd.arg = to;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: group end error %d, status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
}
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_ERASE;
cmd.arg = arg;
cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: erase error %d, status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
}
if (mmc_host_is_spi(card->host))
goto out;
timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
do {
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_SEND_STATUS;
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
/* Do not retry else we can't see errors */
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err || (cmd.resp[0] & 0xFDF92000)) {
pr_err("error %d requesting status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
}
/* Timeout if the device never becomes ready for data and
* never leaves the program state.
*/
if (time_after(jiffies, timeout)) {
pr_err("%s: Card stuck in programming state! %s\n",
mmc_hostname(card->host), __func__);
err = -EIO;
goto out;
}
} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
(R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
out:
return err;
}
/**
* mmc_erase - erase sectors.
* @card: card to erase
* @from: first sector to erase
* @nr: number of sectors to erase
* @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
*
* Caller must claim host before calling this function.
*/
int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg)
{
unsigned int rem, to = from + nr;
if (!(card->host->caps & MMC_CAP_ERASE) ||
!(card->csd.cmdclass & CCC_ERASE))
return -EOPNOTSUPP;
if (!card->erase_size)
return -EOPNOTSUPP;
if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
return -EOPNOTSUPP;
if ((arg & MMC_SECURE_ARGS) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
return -EOPNOTSUPP;
if ((arg & MMC_TRIM_ARGS) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
return -EOPNOTSUPP;
if (arg == MMC_SECURE_ERASE_ARG) {
if (from % card->erase_size || nr % card->erase_size)
return -EINVAL;
}
if (arg == MMC_ERASE_ARG) {
rem = from % card->erase_size;
if (rem) {
rem = card->erase_size - rem;
from += rem;
if (nr > rem)
nr -= rem;
else
return 0;
}
rem = nr % card->erase_size;
if (rem)
nr -= rem;
}
if (nr == 0)
return 0;
to = from + nr;
if (to <= from)
return -EINVAL;
/* 'from' and 'to' are inclusive */
to -= 1;
return mmc_do_erase(card, from, to, arg);
}
EXPORT_SYMBOL(mmc_erase);
int mmc_can_erase(struct mmc_card *card)
{
if ((card->host->caps & MMC_CAP_ERASE) &&
(card->csd.cmdclass & CCC_ERASE) && card->erase_size)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_erase);
int mmc_can_trim(struct mmc_card *card)
{
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_trim);
int mmc_can_discard(struct mmc_card *card)
{
/*
* As there's no way to detect the discard support bit at v4.5
* use the s/w feature support filed.
*/
if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_discard);
int mmc_can_sanitize(struct mmc_card *card)
{
if (!mmc_can_trim(card) && !mmc_can_erase(card))
return 0;
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_sanitize);
int mmc_can_secure_erase_trim(struct mmc_card *card)
{
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_secure_erase_trim);
int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
unsigned int nr)
{
if (!card->erase_size)
return 0;
if (from % card->erase_size || nr % card->erase_size)
return 0;
return 1;
}
EXPORT_SYMBOL(mmc_erase_group_aligned);
static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
unsigned int arg)
{
struct mmc_host *host = card->host;
unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
unsigned int last_timeout = 0;
if (card->erase_shift)
max_qty = UINT_MAX >> card->erase_shift;
else if (mmc_card_sd(card))
max_qty = UINT_MAX;
else
max_qty = UINT_MAX / card->erase_size;
/* Find the largest qty with an OK timeout */
do {
y = 0;
for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
timeout = mmc_erase_timeout(card, arg, qty + x);
if (timeout > host->max_discard_to)
break;
if (timeout < last_timeout)
break;
last_timeout = timeout;
y = x;
}
qty += y;
} while (y);
if (!qty)
return 0;
if (qty == 1)
return 1;
/* Convert qty to sectors */
if (card->erase_shift)
max_discard = --qty << card->erase_shift;
else if (mmc_card_sd(card))
max_discard = qty;
else
max_discard = --qty * card->erase_size;
return max_discard;
}
unsigned int mmc_calc_max_discard(struct mmc_card *card)
{
struct mmc_host *host = card->host;
unsigned int max_discard, max_trim;
if (!host->max_discard_to)
return UINT_MAX;
/*
* Without erase_group_def set, MMC erase timeout depends on clock
* frequence which can change. In that case, the best choice is
* just the preferred erase size.
*/
if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
return card->pref_erase;
max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
if (mmc_can_trim(card)) {
max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
if (max_trim < max_discard)
max_discard = max_trim;
} else if (max_discard < card->erase_size) {
max_discard = 0;
}
pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
mmc_hostname(host), max_discard, host->max_discard_to);
return max_discard;
}
EXPORT_SYMBOL(mmc_calc_max_discard);
int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
{
struct mmc_command cmd = {0};
if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
return 0;
cmd.opcode = MMC_SET_BLOCKLEN;
cmd.arg = blocklen;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
return mmc_wait_for_cmd(card->host, &cmd, 5);
}
EXPORT_SYMBOL(mmc_set_blocklen);
static void mmc_hw_reset_for_init(struct mmc_host *host)
{
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
return;
mmc_host_clk_hold(host);
host->ops->hw_reset(host);
mmc_host_clk_release(host);
}
int mmc_can_reset(struct mmc_card *card)
{
u8 rst_n_function;
if (mmc_card_sdio(card))
return 0;
if (mmc_card_mmc(card)) {
rst_n_function = card->ext_csd.rst_n_function;
if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) !=
EXT_CSD_RST_N_ENABLED)
return 0;
}
return 1;
}
EXPORT_SYMBOL(mmc_can_reset);
static int mmc_do_hw_reset(struct mmc_host *host, int check)
{
struct mmc_card *card = host->card;
if (!host->bus_ops->power_restore)
return -EOPNOTSUPP;
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
return -EOPNOTSUPP;
if (!card)
return -EINVAL;
if (!mmc_can_reset(card))
return -EOPNOTSUPP;
mmc_host_clk_hold(host);
mmc_set_clock(host, host->f_init);
host->ops->hw_reset(host);
/* If the reset has happened, then a status command will fail */
if (check) {
struct mmc_command cmd = {0};
int err;
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (!err) {
mmc_host_clk_release(host);
return -ENOSYS;
}
}
host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
if (mmc_host_is_spi(host)) {
host->ios.chip_select = MMC_CS_HIGH;
host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
} else {
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
}
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
mmc_host_clk_release(host);
return host->bus_ops->power_restore(host);
}
int mmc_hw_reset(struct mmc_host *host)
{
return mmc_do_hw_reset(host, 0);
}
EXPORT_SYMBOL(mmc_hw_reset);
int mmc_hw_reset_check(struct mmc_host *host)
{
return mmc_do_hw_reset(host, 1);
}
EXPORT_SYMBOL(mmc_hw_reset_check);
/**
* mmc_reset_clk_scale_stats() - reset clock scaling statistics
* @host: pointer to mmc host structure
*/
void mmc_reset_clk_scale_stats(struct mmc_host *host)
{
host->clk_scaling.busy_time_us = 0;
host->clk_scaling.window_time = jiffies;
}
EXPORT_SYMBOL_GPL(mmc_reset_clk_scale_stats);
/**
* mmc_get_max_frequency() - get max. frequency supported
* @host: pointer to mmc host structure
*
* Returns max. frequency supported by card/host. If the
* timing mode is SDR50/SDR104/HS200/DDR50 return appropriate
* max. frequency in these modes else, use the current frequency.
* Also, allow host drivers to overwrite the frequency in case
* they support "get_max_frequency" host ops.
*/
unsigned long mmc_get_max_frequency(struct mmc_host *host)
{
unsigned long freq;
if (host->ops && host->ops->get_max_frequency) {
freq = host->ops->get_max_frequency(host);
goto out;
}
switch (host->ios.timing) {
case MMC_TIMING_UHS_SDR50:
freq = UHS_SDR50_MAX_DTR;
break;
case MMC_TIMING_UHS_SDR104:
freq = UHS_SDR104_MAX_DTR;
break;
case MMC_TIMING_MMC_HS200:
freq = MMC_HS200_MAX_DTR;
break;
case MMC_TIMING_UHS_DDR50:
freq = UHS_DDR50_MAX_DTR;
break;
default:
mmc_host_clk_hold(host);
freq = host->ios.clock;
mmc_host_clk_release(host);
break;
}
out:
return freq;
}
EXPORT_SYMBOL_GPL(mmc_get_max_frequency);
/**
* mmc_get_min_frequency() - get min. frequency supported
* @host: pointer to mmc host structure
*
* Returns min. frequency supported by card/host which doesn't impair
* performance for most usecases. If the timing mode is SDR50/SDR104/HS200
* return 50MHz value. If timing mode is DDR50 return 25MHz so that
* throughput would be equivalent to SDR50/SDR104 in 50MHz. Also, allow
* host drivers to overwrite the frequency in case they support
* "get_min_frequency" host ops.
*/
static unsigned long mmc_get_min_frequency(struct mmc_host *host)
{
unsigned long freq;
if (host->ops && host->ops->get_min_frequency) {
freq = host->ops->get_min_frequency(host);
goto out;
}
switch (host->ios.timing) {
case MMC_TIMING_UHS_SDR50:
case MMC_TIMING_UHS_SDR104:
freq = UHS_SDR25_MAX_DTR;
break;
case MMC_TIMING_MMC_HS200:
freq = MMC_HIGH_52_MAX_DTR;
break;
case MMC_TIMING_UHS_DDR50:
freq = UHS_DDR50_MAX_DTR / 2;
break;
default:
mmc_host_clk_hold(host);
freq = host->ios.clock;
mmc_host_clk_release(host);
break;
}
out:
return freq;
}
/*
* Scale down clocks to minimum frequency supported.
* The delayed work re-arms itself in case it cannot
* claim the host.
*/
static void mmc_clk_scale_work(struct work_struct *work)
{
struct mmc_host *host = container_of(work, struct mmc_host,
clk_scaling.work.work);
if (!host->card || !host->bus_ops ||
!host->bus_ops->change_bus_speed ||
!host->clk_scaling.enable || !host->ios.clock)
goto out;
if (!mmc_try_claim_host(host)) {
/* retry after a timer tick */
queue_delayed_work(system_nrt_wq, &host->clk_scaling.work, 1);
goto out;
}
mmc_clk_scaling(host, true);
mmc_release_host(host);
out:
return;
}
static bool mmc_is_vaild_state_for_clk_scaling(struct mmc_host *host)
{
struct mmc_card *card = host->card;
u32 status;
bool ret = false;
if (!card)
goto out;
if (mmc_send_status(card, &status)) {
pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
goto out;
}
switch (R1_CURRENT_STATE(status)) {
case R1_STATE_TRAN:
ret = true;
break;
default:
break;
}
out:
return ret;
}
static int mmc_clk_update_freq(struct mmc_host *host,
unsigned long freq, enum mmc_load state)
{
int err = 0;
if (host->ops->notify_load) {
err = host->ops->notify_load(host, state);
if (err)
goto out;
}
if (freq != host->clk_scaling.curr_freq) {
if (!mmc_is_vaild_state_for_clk_scaling(host)) {
err = -EAGAIN;
goto error;
}
err = host->bus_ops->change_bus_speed(host, &freq);
if (!err)
host->clk_scaling.curr_freq = freq;
else
pr_err("%s: %s: failed (%d) at freq=%lu\n",
mmc_hostname(host), __func__, err, freq);
}
error:
if (err) {
/* restore previous state */
if (host->ops->notify_load)
host->ops->notify_load(host, host->clk_scaling.state);
}
out:
return err;
}
/**
* mmc_clk_scaling() - clock scaling decision algorithm
* @host: pointer to mmc host structure
* @from_wq: variable that specifies the context in which
* mmc_clk_scaling() is called.
*
* Calculate load percentage based on host busy time
* and total sampling interval and decide clock scaling
* based on scale up/down thresholds.
* If load is greater than up threshold increase the
* frequency to maximum as supported by host. Else,
* if load is less than down threshold, scale down the
* frequency to minimum supported by the host. Otherwise,
* retain current frequency and do nothing.
*/
static void mmc_clk_scaling(struct mmc_host *host, bool from_wq)
{
int err = 0;
struct mmc_card *card = host->card;
unsigned long total_time_ms = 0;
unsigned long busy_time_ms = 0;
unsigned long freq;
unsigned int up_threshold = host->clk_scaling.up_threshold;
unsigned int down_threshold = host->clk_scaling.down_threshold;
bool queue_scale_down_work = false;
enum mmc_load state;
if (!card || !host->bus_ops || !host->bus_ops->change_bus_speed) {
pr_err("%s: %s: invalid entry\n", mmc_hostname(host), __func__);
goto out;
}
/* Check if the clocks are already gated. */
if (!host->ios.clock)
goto out;
if (time_is_after_jiffies(host->clk_scaling.window_time +
msecs_to_jiffies(host->clk_scaling.polling_delay_ms)))
goto out;
/* handle time wrap */
total_time_ms = jiffies_to_msecs((long)jiffies -
(long)host->clk_scaling.window_time);
/* Check if we re-enter during clock switching */
if (unlikely(host->clk_scaling.in_progress))
goto out;
host->clk_scaling.in_progress = true;
busy_time_ms = host->clk_scaling.busy_time_us / USEC_PER_MSEC;
freq = host->clk_scaling.curr_freq;
state = host->clk_scaling.state;
/*
* Note that the max. and min. frequency should be based
* on the timing modes that the card and host handshake
* during initialization.
*/
if ((busy_time_ms * 100 > total_time_ms * up_threshold)) {
freq = mmc_get_max_frequency(host);
state = MMC_LOAD_HIGH;
} else if ((busy_time_ms * 100 < total_time_ms * down_threshold)) {
if (!from_wq)
queue_scale_down_work = true;
freq = mmc_get_min_frequency(host);
state = MMC_LOAD_LOW;
}
if (state != host->clk_scaling.state) {
if (!queue_scale_down_work) {
if (!from_wq)
cancel_delayed_work_sync(
&host->clk_scaling.work);
err = mmc_clk_update_freq(host, freq, state);
if (!err)
host->clk_scaling.state = state;
else if (err == -EAGAIN)
goto no_reset_stats;
} else {
/*
* We hold claim host while queueing the scale down
* work, so delay atleast one timer tick to release
* host and re-claim while scaling down the clocks.
*/
queue_delayed_work(system_nrt_wq,
&host->clk_scaling.work, 1);
goto no_reset_stats;
}
}
mmc_reset_clk_scale_stats(host);
no_reset_stats:
host->clk_scaling.in_progress = false;
out:
return;
}
/**
* mmc_disable_clk_scaling() - Disable clock scaling
* @host: pointer to mmc host structure
*
* Disables clock scaling temporarily by setting enable
* property to false. To disable completely, one also
* need to set 'initialized' variable to false.
*/
void mmc_disable_clk_scaling(struct mmc_host *host)
{
cancel_delayed_work_sync(&host->clk_scaling.work);
host->clk_scaling.enable = false;
}
EXPORT_SYMBOL_GPL(mmc_disable_clk_scaling);
/**
* mmc_can_scale_clk() - Check if clock scaling is initialized
* @host: pointer to mmc host structure
*/
bool mmc_can_scale_clk(struct mmc_host *host)
{
return host->clk_scaling.initialized;
}
EXPORT_SYMBOL_GPL(mmc_can_scale_clk);
/**
* mmc_init_clk_scaling() - Initialize clock scaling
* @host: pointer to mmc host structure
*
* Initialize clock scaling for supported hosts.
* It is assumed that the caller ensure clock is
* running at maximum possible frequency before
* calling this function.
*/
void mmc_init_clk_scaling(struct mmc_host *host)
{
if (!host->card || !(host->caps2 & MMC_CAP2_CLK_SCALE))
return;
INIT_DELAYED_WORK(&host->clk_scaling.work, mmc_clk_scale_work);
host->clk_scaling.curr_freq = mmc_get_max_frequency(host);
if (host->ops->notify_load)
host->ops->notify_load(host, MMC_LOAD_HIGH);
host->clk_scaling.state = MMC_LOAD_HIGH;
mmc_reset_clk_scale_stats(host);
host->clk_scaling.enable = true;
host->clk_scaling.initialized = true;
pr_debug("%s: clk scaling enabled\n", mmc_hostname(host));
}
EXPORT_SYMBOL_GPL(mmc_init_clk_scaling);
/**
* mmc_exit_clk_scaling() - Disable clock scaling
* @host: pointer to mmc host structure
*
* Disable clock scaling permanently.
*/
void mmc_exit_clk_scaling(struct mmc_host *host)
{
cancel_delayed_work_sync(&host->clk_scaling.work);
memset(&host->clk_scaling, 0, sizeof(host->clk_scaling));
}
EXPORT_SYMBOL_GPL(mmc_exit_clk_scaling);
static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
{
host->f_init = freq;
#ifdef CONFIG_MMC_DEBUG
pr_info("%s: %s: trying to init card at %u Hz\n",
mmc_hostname(host), __func__, host->f_init);
#endif
mmc_power_up(host);
/*
* Some eMMCs (with VCCQ always on) may not be reset after power up, so
* do a hardware reset if possible.
*/
mmc_hw_reset_for_init(host);
/* Initialization should be done at 3.3 V I/O voltage. */
mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
/*
* sdio_reset sends CMD52 to reset card. Since we do not know
* if the card is being re-initialized, just send it. CMD52
* should be ignored by SD/eMMC cards.
*/
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, host->ocr_avail);
/* Order's important: probe SDIO, then SD, then MMC */
if (!mmc_attach_sdio(host))
return 0;
if (!host->ios.vdd)
mmc_power_up(host);
if (!mmc_attach_sd(host))
return 0;
if (!host->ios.vdd)
mmc_power_up(host);
if (!mmc_attach_mmc(host))
return 0;
mmc_power_off(host);
return -EIO;
}
int _mmc_detect_card_removed(struct mmc_host *host)
{
int ret;
if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
return 0;
if (!host->card || mmc_card_removed(host->card))
return 1;
ret = host->bus_ops->alive(host);
if (ret) {
mmc_card_set_removed(host->card);
pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
return ret;
}
int mmc_detect_card_removed(struct mmc_host *host)
{
struct mmc_card *card = host->card;
int ret;
WARN_ON(!host->claimed);
if (!card)
return 1;
ret = mmc_card_removed(card);
/*
* The card will be considered unchanged unless we have been asked to
* detect a change or host requires polling to provide card detection.
*/
if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) &&
!(host->caps2 & MMC_CAP2_DETECT_ON_ERR))
return ret;
host->detect_change = 0;
if (!ret) {
ret = _mmc_detect_card_removed(host);
if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) {
/*
* Schedule a detect work as soon as possible to let a
* rescan handle the card removal.
*/
cancel_delayed_work(&host->detect);
mmc_detect_change(host, 0);
}
}
return ret;
}
EXPORT_SYMBOL(mmc_detect_card_removed);
void mmc_rescan(struct work_struct *work)
{
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
bool extend_wakelock = false;
if (host->rescan_disable)
return;
mmc_bus_get(host);
/*
* if there is a _removable_ card registered, check whether it is
* still present
*/
if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
&& !(host->caps & MMC_CAP_NONREMOVABLE))
host->bus_ops->detect(host);
host->detect_change = 0;
/* If the card was removed the bus will be marked
* as dead - extend the wakelock so userspace
* can respond */
if (host->bus_dead)
extend_wakelock = 1;
/* If the card was removed the bus will be marked
* as dead - extend the wakelock so userspace
* can respond */
if (host->bus_dead)
extend_wakelock = 1;
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
* the card is no longer present.
*/
mmc_bus_put(host);
mmc_bus_get(host);
/* if there still is a card present, stop here */
if (host->bus_ops != NULL) {
mmc_bus_put(host);
goto out;
}
/*
* Only we can add a new handler, so it's safe to
* release the lock here.
*/
mmc_bus_put(host);
if (host->ops->get_cd && host->ops->get_cd(host) == 0)
goto out;
mmc_claim_host(host);
if (!mmc_rescan_try_freq(host, host->f_min))
extend_wakelock = true;
mmc_release_host(host);
out:
if (extend_wakelock)
wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
else
wake_unlock(&host->detect_wake_lock);
if (host->caps & MMC_CAP_NEEDS_POLL) {
wake_lock(&host->detect_wake_lock);
mmc_schedule_delayed_work(&host->detect, HZ);
}
}
void mmc_start_host(struct mmc_host *host)
{
mmc_power_off(host);
mmc_detect_change(host, 0);
}
void mmc_stop_host(struct mmc_host *host)
{
#ifdef CONFIG_MMC_DEBUG
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->removed = 1;
spin_unlock_irqrestore(&host->lock, flags);
#endif
if (cancel_delayed_work_sync(&host->detect))
wake_unlock(&host->detect_wake_lock);
mmc_flush_scheduled_work();
/* clear pm flags now and let card drivers set them as needed */
host->pm_flags = 0;
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
/* Calling bus_ops->remove() with a claimed host can deadlock */
if (host->bus_ops->remove)
host->bus_ops->remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
mmc_bus_put(host);
return;
}
mmc_bus_put(host);
BUG_ON(host->card);
mmc_power_off(host);
}
int mmc_power_save_host(struct mmc_host *host)
{
int ret = 0;
#ifdef CONFIG_MMC_DEBUG
pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
#endif
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
mmc_bus_put(host);
return -EINVAL;
}
if (host->bus_ops->power_save)
ret = host->bus_ops->power_save(host);
mmc_bus_put(host);
mmc_power_off(host);
return ret;
}
EXPORT_SYMBOL(mmc_power_save_host);
int mmc_power_restore_host(struct mmc_host *host)
{
int ret;
#ifdef CONFIG_MMC_DEBUG
pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
#endif
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
mmc_bus_put(host);
return -EINVAL;
}
mmc_power_up(host);
ret = host->bus_ops->power_restore(host);
mmc_bus_put(host);
return ret;
}
EXPORT_SYMBOL(mmc_power_restore_host);
int mmc_card_awake(struct mmc_host *host)
{
int err = -ENOSYS;
if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
return 0;
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
err = host->bus_ops->awake(host);
mmc_bus_put(host);
return err;
}
EXPORT_SYMBOL(mmc_card_awake);
int mmc_card_sleep(struct mmc_host *host)
{
int err = -ENOSYS;
if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
return 0;
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
err = host->bus_ops->sleep(host);
mmc_bus_put(host);
return err;
}
EXPORT_SYMBOL(mmc_card_sleep);
int mmc_card_can_sleep(struct mmc_host *host)
{
struct mmc_card *card = host->card;
if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_card_can_sleep);
/*
* Flush the cache to the non-volatile storage.
*/
int mmc_flush_cache(struct mmc_card *card)
{
struct mmc_host *host = card->host;
int err = 0, rc;
if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
return err;
if (mmc_card_mmc(card) &&
(card->ext_csd.cache_size > 0) &&
(card->ext_csd.cache_ctrl & 1)) {
err = mmc_switch_ignore_timeout(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1,
MMC_FLUSH_REQ_TIMEOUT_MS);
if (err == -ETIMEDOUT) {
pr_debug("%s: cache flush timeout\n",
mmc_hostname(card->host));
rc = mmc_interrupt_hpi(card);
if (rc)
pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
mmc_hostname(host), rc);
} else if (err) {
pr_err("%s: cache flush error %d\n",
mmc_hostname(card->host), err);
}
}
return err;
}
EXPORT_SYMBOL(mmc_flush_cache);
/*
* Turn the cache ON/OFF.
* Turning the cache OFF shall trigger flushing of the data
* to the non-volatile storage.
* This function should be called with host claimed
*/
int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
{
struct mmc_card *card = host->card;
unsigned int timeout = card->ext_csd.generic_cmd6_time;
int err = 0, rc;
if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
mmc_card_is_removable(host))
return err;
if (card && mmc_card_mmc(card) &&
(card->ext_csd.cache_size > 0)) {
enable = !!enable;
if (card->ext_csd.cache_ctrl ^ enable) {
if (!enable)
timeout = MMC_FLUSH_REQ_TIMEOUT_MS;
err = mmc_switch_ignore_timeout(card,
EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_CACHE_CTRL, enable, timeout);
if (err == -ETIMEDOUT && !enable) {
pr_debug("%s:cache disable operation timeout\n",
mmc_hostname(card->host));
rc = mmc_interrupt_hpi(card);
if (rc)
pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
mmc_hostname(host), rc);
} else if (err) {
pr_err("%s: cache %s error %d\n",
mmc_hostname(card->host),
enable ? "on" : "off",
err);
} else {
card->ext_csd.cache_ctrl = enable;
}
}
}
return err;
}
EXPORT_SYMBOL(mmc_cache_ctrl);
#ifdef CONFIG_PM
/**
* mmc_suspend_host - suspend a host
* @host: mmc host
*/
int mmc_suspend_host(struct mmc_host *host)
{
int err = 0;
if (mmc_bus_needs_resume(host))
return 0;
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
/*
* A long response time is not acceptable for device drivers
* when doing suspend. Prevent mmc_claim_host in the suspend
* sequence, to potentially wait "forever" by trying to
* pre-claim the host.
*
* Skip try claim host for SDIO cards, doing so fixes deadlock
* conditions. The function driver suspend may again call into
* SDIO driver within a different context for enabling power
* save mode in the card and hence wait in mmc_claim_host
* causing deadlock.
*/
if (!(host->card && mmc_card_sdio(host->card)))
if (!mmc_try_claim_host(host))
err = -EBUSY;
if (!err) {
if (host->bus_ops->suspend) {
err = mmc_stop_bkops(host->card);
if (err)
goto stop_bkops_err;
err = host->bus_ops->suspend(host);
}
if (!(host->card && mmc_card_sdio(host->card)))
mmc_release_host(host);
if (err == -ENOSYS || !host->bus_ops->resume) {
/*
* We simply "remove" the card in this case.
* It will be redetected on resume. (Calling
* bus_ops->remove() with a claimed host can
* deadlock.)
* It will be redetected on resume.
*/
if (host->bus_ops->remove)
host->bus_ops->remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
host->pm_flags = 0;
err = 0;
}
}
}
mmc_bus_put(host);
if (!err && !mmc_card_keep_power(host))
mmc_power_off(host);
return err;
stop_bkops_err:
if (!(host->card && mmc_card_sdio(host->card)))
mmc_release_host(host);
return err;
}
EXPORT_SYMBOL(mmc_suspend_host);
/**
* mmc_resume_host - resume a previously suspended host
* @host: mmc host
*/
int mmc_resume_host(struct mmc_host *host)
{
int err = 0;
mmc_bus_get(host);
if (mmc_bus_manual_resume(host)) {
host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
mmc_bus_put(host);
return 0;
}
if (host->bus_ops && !host->bus_dead) {
if (!mmc_card_keep_power(host)) {
mmc_power_up(host);
mmc_select_voltage(host, host->ocr);
/*
* Tell runtime PM core we just powered up the card,
* since it still believes the card is powered off.
* Note that currently runtime PM is only enabled
* for SDIO cards that are MMC_CAP_POWER_OFF_CARD
*/
if (mmc_card_sdio(host->card) &&
(host->caps & MMC_CAP_POWER_OFF_CARD)) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_active(&host->card->dev);
pm_runtime_enable(&host->card->dev);
}
}
BUG_ON(!host->bus_ops->resume);
err = host->bus_ops->resume(host);
if (err) {
pr_warning("%s: error %d during resume "
"(card was removed?)\n",
mmc_hostname(host), err);
err = 0;
}
}
host->pm_flags &= ~MMC_PM_KEEP_POWER;
mmc_bus_put(host);
return err;
}
EXPORT_SYMBOL(mmc_resume_host);
/* Do the card removal on suspend if card is assumed removeable
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
to sync the card.
*/
int mmc_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused)
{
struct mmc_host *host = container_of(
notify_block, struct mmc_host, pm_notify);
unsigned long flags;
int err = 0;
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
if (host->card && mmc_card_mmc(host->card)) {
mmc_claim_host(host);
err = mmc_stop_bkops(host->card);
mmc_release_host(host);
if (err) {
pr_err("%s: didn't stop bkops\n",
mmc_hostname(host));
return err;
}
}
spin_lock_irqsave(&host->lock, flags);
if (mmc_bus_needs_resume(host)) {
spin_unlock_irqrestore(&host->lock, flags);
break;
}
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
if (cancel_delayed_work_sync(&host->detect))
wake_unlock(&host->detect_wake_lock);
if (!host->bus_ops || host->bus_ops->suspend)
break;
/* Calling bus_ops->remove() with a claimed host can deadlock */
if (host->bus_ops->remove)
host->bus_ops->remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
host->pm_flags = 0;
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
spin_lock_irqsave(&host->lock, flags);
if (mmc_bus_manual_resume(host)) {
spin_unlock_irqrestore(&host->lock, flags);
break;
}
host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
mmc_detect_change(host, 0);
}
return 0;
}
#endif
#ifdef CONFIG_MMC_EMBEDDED_SDIO
void mmc_set_embedded_sdio_data(struct mmc_host *host,
struct sdio_cis *cis,
struct sdio_cccr *cccr,
struct sdio_embedded_func *funcs,
int num_funcs)
{
host->embedded_sdio_data.cis = cis;
host->embedded_sdio_data.cccr = cccr;
host->embedded_sdio_data.funcs = funcs;
host->embedded_sdio_data.num_funcs = num_funcs;
}
EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
#endif
static int __init mmc_init(void)
{
int ret;
workqueue = alloc_ordered_workqueue("kmmcd", 0);
if (!workqueue)
return -ENOMEM;
ret = mmc_register_bus();
if (ret)
goto destroy_workqueue;
ret = mmc_register_host_class();
if (ret)
goto unregister_bus;
ret = sdio_register_bus();
if (ret)
goto unregister_host_class;
return 0;
unregister_host_class:
mmc_unregister_host_class();
unregister_bus:
mmc_unregister_bus();
destroy_workqueue:
destroy_workqueue(workqueue);
return ret;
}
static void __exit mmc_exit(void)
{
sdio_unregister_bus();
mmc_unregister_host_class();
mmc_unregister_bus();
destroy_workqueue(workqueue);
}
subsys_initcall(mmc_init);
module_exit(mmc_exit);
MODULE_LICENSE("GPL");
| VilleEvitaCake/android_kernel_htc_msm8960 | drivers/mmc/core/core.c | C | gpl-2.0 | 80,989 |
/*
* linux/arch/arm/kernel/traps.c
*
* Copyright (C) 1995-2009 Russell King
* Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 'traps.c' handles hardware exceptions after we have saved some state in
* 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
* kill the offending process.
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/spinlock.h>
#include <linux/personality.h>
#include <linux/kallsyms.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/system.h>
#include <asm/unistd.h>
#include <asm/traps.h>
#include <asm/unwind.h>
#include "ptrace.h"
#include "signal.h"
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
void *vectors_page;
#ifdef CONFIG_DEBUG_USER
unsigned int user_debug;
static int __init user_debug_setup(char *str)
{
get_option(&str, &user_debug);
return 1;
}
__setup("user_debug=", user_debug_setup);
#endif
static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{
#ifdef CONFIG_KALLSYMS
char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
sprint_symbol(sym1, where);
sprint_symbol(sym2, from);
printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
#else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
if (in_exception_text(where))
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
}
#ifndef CONFIG_ARM_UNWIND
/*
* Stack pointers should always be within the kernels view of
* physical memory. If it is not there, then we can't dump
* out any information relating to the stack.
*/
static int verify_stack(unsigned long sp)
{
if (sp < PAGE_OFFSET ||
(sp > (unsigned long)high_memory && high_memory != NULL))
return -EFAULT;
return 0;
}
#endif
/*
* Dump out the contents of some memory nicely...
*/
static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
unsigned long top)
{
unsigned long first;
mm_segment_t fs;
int i;
/*
* We need to switch to kernel mode so that we can use __get_user
* to safely read from kernel space. Note that we now dump the
* code first, just in case the backtrace kills us.
*/
fs = get_fs();
set_fs(KERNEL_DS);
printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
for (first = bottom & ~31; first < top; first += 32) {
unsigned long p;
char str[sizeof(" 12345678") * 8 + 1];
memset(str, ' ', sizeof(str));
str[sizeof(str) - 1] = '\0';
for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
if (p >= bottom && p < top) {
unsigned long val;
if (__get_user(val, (unsigned long *)p) == 0)
sprintf(str + i * 9, " %08lx", val);
else
sprintf(str + i * 9, " ????????");
}
}
printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
}
set_fs(fs);
}
static void dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
* We need to switch to kernel mode so that we can use __get_user
* to safely read from kernel space. Note that we now dump the
* code first, just in case the backtrace kills us.
*/
fs = get_fs();
set_fs(KERNEL_DS);
for (i = -4; i < 1; i++) {
unsigned int val, bad;
if (thumb)
bad = __get_user(val, &((u16 *)addr)[i]);
else
bad = __get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
width, val);
else {
p += sprintf(p, "bad PC value");
break;
}
}
printk("%sCode: %s\n", lvl, str);
set_fs(fs);
}
#ifdef CONFIG_ARM_UNWIND
static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
unwind_backtrace(regs, tsk);
}
#else
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
unsigned int fp, mode;
int ok = 1;
printk("Backtrace: ");
if (!tsk)
tsk = current;
if (regs) {
fp = regs->ARM_fp;
mode = processor_mode(regs);
} else if (tsk != current) {
fp = thread_saved_fp(tsk);
mode = 0x10;
} else {
asm("mov %0, fp" : "=r" (fp) : : "cc");
mode = 0x10;
}
if (!fp) {
printk("no frame pointer");
ok = 0;
} else if (verify_stack(fp)) {
printk("invalid frame pointer 0x%08x", fp);
ok = 0;
} else if (fp < (unsigned long)end_of_stack(tsk))
printk("frame pointer underflow");
printk("\n");
if (ok)
c_backtrace(fp, mode);
}
#endif
void dump_stack(void)
{
dump_backtrace(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
dump_backtrace(NULL, tsk);
barrier();
}
#ifdef CONFIG_PREEMPT
#define S_PREEMPT " PREEMPT"
#else
#define S_PREEMPT ""
#endif
#ifdef CONFIG_SMP
#define S_SMP " SMP"
#else
#define S_SMP ""
#endif
static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
{
struct task_struct *tsk = thread->task;
static int die_counter;
#if defined(CONFIG_MACH_STAR)
set_default_loglevel(); /* 20100916 set default loglevel */
#endif
printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
str, err, ++die_counter);
sysfs_printk_last_file();
print_modules();
__show_regs(regs);
printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
dump_backtrace(regs, tsk);
dump_instr(KERN_EMERG, regs);
}
}
DEFINE_SPINLOCK(die_lock);
/*
* This function is protected against re-entrancy.
*/
NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
{
struct thread_info *thread = current_thread_info();
oops_enter();
spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
__die(str, err, thread, regs);
bust_spinlocks(0);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
do_exit(SIGSEGV);
}
void arm_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, unsigned long err, unsigned long trap)
{
if (user_mode(regs)) {
current->thread.error_code = err;
current->thread.trap_no = trap;
force_sig_info(info->si_signo, info, current);
} else {
die(str, regs, err);
}
}
static LIST_HEAD(undef_hook);
static DEFINE_SPINLOCK(undef_lock);
void register_undef_hook(struct undef_hook *hook)
{
unsigned long flags;
spin_lock_irqsave(&undef_lock, flags);
list_add(&hook->node, &undef_hook);
spin_unlock_irqrestore(&undef_lock, flags);
}
void unregister_undef_hook(struct undef_hook *hook)
{
unsigned long flags;
spin_lock_irqsave(&undef_lock, flags);
list_del(&hook->node);
spin_unlock_irqrestore(&undef_lock, flags);
}
static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
{
struct undef_hook *hook;
unsigned long flags;
int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
spin_lock_irqsave(&undef_lock, flags);
list_for_each_entry(hook, &undef_hook, node)
if ((instr & hook->instr_mask) == hook->instr_val &&
(regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
fn = hook->fn;
spin_unlock_irqrestore(&undef_lock, flags);
return fn ? fn(regs, instr) : 1;
}
asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
{
unsigned int correction = thumb_mode(regs) ? 2 : 4;
unsigned int instr;
siginfo_t info;
void __user *pc;
/*
* According to the ARM ARM, PC is 2 or 4 bytes ahead,
* depending whether we're in Thumb mode or not.
* Correct this offset.
*/
regs->ARM_pc -= correction;
pc = (void __user *)instruction_pointer(regs);
if (processor_mode(regs) == SVC_MODE) {
instr = *(u32 *) pc;
} else if (thumb_mode(regs)) {
get_user(instr, (u16 __user *)pc);
} else {
get_user(instr, (u32 __user *)pc);
}
if (call_undef_hook(regs, instr) == 0)
return;
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_UNDEFINED) {
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
dump_instr(KERN_INFO, regs);
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = pc;
arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
}
asmlinkage void do_unexp_fiq (struct pt_regs *regs)
{
printk("Hmm. Unexpected FIQ received, but trying to continue\n");
printk("You may have a hardware problem...\n");
}
/*
* bad_mode handles the impossible case in the vectors. If you see one of
* these, then it's extremely serious, and could mean you have buggy hardware.
* It never returns, and never tries to sync. We hope that we can at least
* dump out some state information...
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason)
{
console_verbose();
printk(KERN_CRIT "Bad mode in %s handler detected\n", handler[reason]);
die("Oops - bad mode", regs, 0);
local_irq_disable();
panic("bad mode");
}
static int bad_syscall(int n, struct pt_regs *regs)
{
struct thread_info *thread = current_thread_info();
siginfo_t info;
if (current->personality != PER_LINUX &&
current->personality != PER_LINUX_32BIT &&
thread->exec_domain->handler) {
thread->exec_domain->handler(n, regs);
return regs->ARM_r0;
}
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_SYSCALL) {
printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
task_pid_nr(current), current->comm, n);
dump_instr(KERN_ERR, regs);
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
info.si_addr = (void __user *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4);
arm_notify_die("Oops - bad syscall", regs, &info, n, 0);
return regs->ARM_r0;
}
static inline void
do_cache_op(unsigned long start, unsigned long end, int flags)
{
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *vma;
if (end < start || flags)
return;
down_read(&mm->mmap_sem);
vma = find_vma(mm, start);
if (vma && vma->vm_start < end) {
if (start < vma->vm_start)
start = vma->vm_start;
if (end > vma->vm_end)
end = vma->vm_end;
up_read(&mm->mmap_sem);
flush_cache_user_range(start, end);
return;
}
up_read(&mm->mmap_sem);
}
/*
* Handle all unrecognised system calls.
* 0x9f0000 - 0x9fffff are some more esoteric system calls
*/
#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
asmlinkage int arm_syscall(int no, struct pt_regs *regs)
{
struct thread_info *thread = current_thread_info();
siginfo_t info;
if ((no >> 16) != (__ARM_NR_BASE>> 16))
return bad_syscall(no, regs);
switch (no & 0xffff) {
case 0: /* branch through 0 */
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = SEGV_MAPERR;
info.si_addr = NULL;
arm_notify_die("branch through zero", regs, &info, 0, 0);
return 0;
case NR(breakpoint): /* SWI BREAK_POINT */
regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
ptrace_break(current, regs);
return regs->ARM_r0;
/*
* Flush a region from virtual address 'r0' to virtual address 'r1'
* _exclusive_. There is no alignment requirement on either address;
* user space does not need to know the hardware cache layout.
*
* r2 contains flags. It should ALWAYS be passed as ZERO until it
* is defined to be something else. For now we ignore it, but may
* the fires of hell burn in your belly if you break this rule. ;)
*
* (at a later date, we may want to allow this call to not flush
* various aspects of the cache. Passing '0' will guarantee that
* everything necessary gets flushed to maintain consistency in
* the specified region).
*/
case NR(cacheflush):
do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
return 0;
case NR(usr26):
if (!(elf_hwcap & HWCAP_26BIT))
break;
regs->ARM_cpsr &= ~MODE32_BIT;
return regs->ARM_r0;
case NR(usr32):
if (!(elf_hwcap & HWCAP_26BIT))
break;
regs->ARM_cpsr |= MODE32_BIT;
return regs->ARM_r0;
case NR(set_tls):
thread->tp_value = regs->ARM_r0;
#if defined(CONFIG_HAS_TLS_REG)
#if defined(CONFIG_TEGRA_ERRATA_657451)
BUG_ON(regs->ARM_r0 & 0x1);
asm ("mcr p15, 0, %0, c13, c0, 3" : :
"r" ((regs->ARM_r0) | ((regs->ARM_r0>>20) & 0x1)));
#else
asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) );
#endif
#elif !defined(CONFIG_TLS_REG_EMUL)
/*
* User space must never try to access this directly.
* Expect your app to break eventually if you do so.
* The user helper at 0xffff0fe0 must be used instead.
* (see entry-armv.S for details)
*/
*((unsigned int *)0xffff0ff0) = regs->ARM_r0;
#endif
return 0;
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
/*
* Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
* Return zero in r0 if *MEM was changed or non-zero if no exchange
* happened. Also set the user C flag accordingly.
* If access permissions have to be fixed up then non-zero is
* returned and the operation has to be re-attempted.
*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
case NR(cmpxchg):
for (;;) {
extern void do_DataAbort(unsigned long addr, unsigned int fsr,
struct pt_regs *regs);
unsigned long val;
unsigned long addr = regs->ARM_r2;
struct mm_struct *mm = current->mm;
pgd_t *pgd; pmd_t *pmd; pte_t *pte;
spinlock_t *ptl;
regs->ARM_cpsr &= ~PSR_C_BIT;
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, addr);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte_present(*pte) || !pte_dirty(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
val = *(unsigned long *)addr;
val -= regs->ARM_r0;
if (val == 0) {
*(unsigned long *)addr = regs->ARM_r1;
regs->ARM_cpsr |= PSR_C_BIT;
}
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return val;
bad_access:
up_read(&mm->mmap_sem);
/* simulate a write access fault */
do_DataAbort(addr, 15 + (1 << 11), regs);
}
#endif
default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This
way the calling program can gracefully determine whether
a feature is supported. */
if ((no & 0xffff) <= 0x7ff)
return -ENOSYS;
break;
}
#ifdef CONFIG_DEBUG_USER
/*
* experience shows that these seem to indicate that
* something catastrophic has happened
*/
if (user_debug & UDBG_SYSCALL) {
printk("[%d] %s: arm syscall %d\n",
task_pid_nr(current), current->comm, no);
dump_instr("", regs);
if (user_mode(regs)) {
__show_regs(regs);
c_backtrace(regs->ARM_fp, processor_mode(regs));
}
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
info.si_addr = (void __user *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4);
arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
return 0;
}
#ifdef CONFIG_TLS_REG_EMUL
/*
* We might be running on an ARMv6+ processor which should have the TLS
* register but for some reason we can't use it, or maybe an SMP system
* using a pre-ARMv6 processor (there are apparently a few prototypes like
* that in existence) and therefore access to that register must be
* emulated.
*/
static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
{
int reg = (instr >> 12) & 15;
if (reg == 15)
return 1;
regs->uregs[reg] = current_thread_info()->tp_value;
regs->ARM_pc += 4;
return 0;
}
static struct undef_hook arm_mrc_hook = {
.instr_mask = 0x0fff0fff,
.instr_val = 0x0e1d0f70,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = 0,
.fn = get_tp_trap,
};
static int __init arm_mrc_hook_init(void)
{
register_undef_hook(&arm_mrc_hook);
return 0;
}
late_initcall(arm_mrc_hook_init);
#endif
void __bad_xchg(volatile void *ptr, int size)
{
printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
__builtin_return_address(0), ptr, size);
BUG();
}
EXPORT_SYMBOL(__bad_xchg);
/*
* A data abort trap was taken, but we did not handle the instruction.
* Try to abort the user program, or panic if it was the kernel.
*/
asmlinkage void
baddataabort(int code, unsigned long instr, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
siginfo_t info;
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_BADABORT) {
printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
task_pid_nr(current), current->comm, code, instr);
dump_instr(KERN_ERR, regs);
show_pte(current->mm, addr);
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = (void __user *)addr;
arm_notify_die("unknown data abort code", regs, &info, instr, 0);
}
void __attribute__((noreturn)) __bug(const char *file, int line)
{
printk(KERN_CRIT"kernel BUG at %s:%d!\n", file, line);
*(int *)0 = 0;
/* Avoid "noreturn function does return" */
for (;;);
}
EXPORT_SYMBOL(__bug);
void __readwrite_bug(const char *fn)
{
printk("%s called, but not implemented\n", fn);
BUG();
}
EXPORT_SYMBOL(__readwrite_bug);
void __pte_error(const char *file, int line, unsigned long val)
{
printk("%s:%d: bad pte %08lx.\n", file, line, val);
}
void __pmd_error(const char *file, int line, unsigned long val)
{
printk("%s:%d: bad pmd %08lx.\n", file, line, val);
}
void __pgd_error(const char *file, int line, unsigned long val)
{
printk("%s:%d: bad pgd %08lx.\n", file, line, val);
}
asmlinkage void __div0(void)
{
printk("Division by zero in kernel.\n");
dump_stack();
}
EXPORT_SYMBOL(__div0);
void abort(void)
{
BUG();
/* if that doesn't kill us, halt */
panic("Oops failed to kill thread");
}
EXPORT_SYMBOL(abort);
void __init trap_init(void)
{
return;
}
void __init early_trap_init(void)
{
#if defined(CONFIG_CPU_USE_DOMAINS)
unsigned long vectors = CONFIG_VECTORS_BASE;
#else
unsigned long vectors = (unsigned long)vectors_page;
#endif
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
extern char __kuser_helper_start[], __kuser_helper_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
/*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
/*
* Copy signal return handlers into the vector page, and
* set sigreturn to be a pointer to these.
*/
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
sigreturn_codes, sizeof(sigreturn_codes));
memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
syscall_restart_code, sizeof(syscall_restart_code));
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
| Mazout360/lge-kernel-gb | arch/arm/kernel/traps.c | C | gpl-2.0 | 19,685 |
/* BGP-4, BGP-4+ daemon program
Copyright (C) 1996, 97, 98, 99, 2000 Kunihiro Ishiguro
This file is part of GNU Kroute.
GNU Kroute is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
GNU Kroute is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Kroute; see the file COPYING. If not, write to the Free
Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
02111-1307, USA. */
#include <kroute.h>
#include "prefix.h"
#include "thread.h"
#include "buffer.h"
#include "stream.h"
#include "command.h"
#include "sockunion.h"
#include "network.h"
#include "memory.h"
#include "filter.h"
#include "routemap.h"
#include "str.h"
#include "log.h"
#include "plist.h"
#include "linklist.h"
#include "workqueue.h"
#include "bgpd/bgpd.h"
#include "bgpd/bgp_table.h"
#include "bgpd/bgp_aspath.h"
#include "bgpd/bgp_route.h"
#include "bgpd/bgp_dump.h"
#include "bgpd/bgp_debug.h"
#include "bgpd/bgp_community.h"
#include "bgpd/bgp_attr.h"
#include "bgpd/bgp_regex.h"
#include "bgpd/bgp_clist.h"
#include "bgpd/bgp_fsm.h"
#include "bgpd/bgp_packet.h"
#include "bgpd/bgp_kroute.h"
#include "bgpd/bgp_open.h"
#include "bgpd/bgp_filter.h"
#include "bgpd/bgp_nexthop.h"
#include "bgpd/bgp_damp.h"
#include "bgpd/bgp_mplsvpn.h"
#include "bgpd/bgp_advertise.h"
#include "bgpd/bgp_network.h"
#include "bgpd/bgp_vty.h"
#include "bgpd/bgp_mpath.h"
#ifdef HAVE_SNMP
#include "bgpd/bgp_snmp.h"
#endif /* HAVE_SNMP */
/* BGP process wide configuration. */
static struct bgp_master bgp_master;
extern struct in_addr router_id_kroute;
/* BGP process wide configuration pointer to export. */
struct bgp_master *bm;
/* BGP community-list. */
struct community_list_handler *bgp_clist;
/* BGP global flag manipulation. */
int
bgp_option_set (int flag)
{
switch (flag)
{
case BGP_OPT_NO_FIB:
case BGP_OPT_MULTIPLE_INSTANCE:
case BGP_OPT_CONFIG_CISCO:
SET_FLAG (bm->options, flag);
break;
default:
return BGP_ERR_INVALID_FLAG;
}
return 0;
}
int
bgp_option_unset (int flag)
{
switch (flag)
{
case BGP_OPT_MULTIPLE_INSTANCE:
if (listcount (bm->bgp) > 1)
return BGP_ERR_MULTIPLE_INSTANCE_USED;
/* Fall through. */
case BGP_OPT_NO_FIB:
case BGP_OPT_CONFIG_CISCO:
UNSET_FLAG (bm->options, flag);
break;
default:
return BGP_ERR_INVALID_FLAG;
}
return 0;
}
int
bgp_option_check (int flag)
{
return CHECK_FLAG (bm->options, flag);
}
/* BGP flag manipulation. */
int
bgp_flag_set (struct bgp *bgp, int flag)
{
SET_FLAG (bgp->flags, flag);
return 0;
}
int
bgp_flag_unset (struct bgp *bgp, int flag)
{
UNSET_FLAG (bgp->flags, flag);
return 0;
}
int
bgp_flag_check (struct bgp *bgp, int flag)
{
return CHECK_FLAG (bgp->flags, flag);
}
/* Internal function to set BGP structure configureation flag. */
static void
bgp_config_set (struct bgp *bgp, int config)
{
SET_FLAG (bgp->config, config);
}
static void
bgp_config_unset (struct bgp *bgp, int config)
{
UNSET_FLAG (bgp->config, config);
}
static int
bgp_config_check (struct bgp *bgp, int config)
{
return CHECK_FLAG (bgp->config, config);
}
/* Set BGP router identifier. */
int
bgp_router_id_set (struct bgp *bgp, struct in_addr *id)
{
struct peer *peer;
struct listnode *node, *nnode;
if (bgp_config_check (bgp, BGP_CONFIG_ROUTER_ID)
&& IPV4_ADDR_SAME (&bgp->router_id, id))
return 0;
IPV4_ADDR_COPY (&bgp->router_id, id);
bgp_config_set (bgp, BGP_CONFIG_ROUTER_ID);
/* Set all peer's local identifier with this value. */
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
IPV4_ADDR_COPY (&peer->local_id, id);
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_RID_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
}
return 0;
}
/* BGP's cluster-id control. */
int
bgp_cluster_id_set (struct bgp *bgp, struct in_addr *cluster_id)
{
struct peer *peer;
struct listnode *node, *nnode;
if (bgp_config_check (bgp, BGP_CONFIG_CLUSTER_ID)
&& IPV4_ADDR_SAME (&bgp->cluster_id, cluster_id))
return 0;
IPV4_ADDR_COPY (&bgp->cluster_id, cluster_id);
bgp_config_set (bgp, BGP_CONFIG_CLUSTER_ID);
/* Clear all IBGP peer. */
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
if (peer_sort (peer) != BGP_PEER_IBGP)
continue;
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_CLID_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
}
return 0;
}
int
bgp_cluster_id_unset (struct bgp *bgp)
{
struct peer *peer;
struct listnode *node, *nnode;
if (! bgp_config_check (bgp, BGP_CONFIG_CLUSTER_ID))
return 0;
bgp->cluster_id.s_addr = 0;
bgp_config_unset (bgp, BGP_CONFIG_CLUSTER_ID);
/* Clear all IBGP peer. */
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
if (peer_sort (peer) != BGP_PEER_IBGP)
continue;
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_CLID_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
}
return 0;
}
/* time_t value that is monotonicly increasing
* and uneffected by adjustments to system clock
*/
time_t bgp_clock (void)
{
struct timeval tv;
bane_gettime(BANE_CLK_MONOTONIC, &tv);
return tv.tv_sec;
}
/* BGP timer configuration. */
int
bgp_timers_set (struct bgp *bgp, u_int32_t keepalive, u_int32_t holdtime)
{
bgp->default_keepalive = (keepalive < holdtime / 3
? keepalive : holdtime / 3);
bgp->default_holdtime = holdtime;
return 0;
}
int
bgp_timers_unset (struct bgp *bgp)
{
bgp->default_keepalive = BGP_DEFAULT_KEEPALIVE;
bgp->default_holdtime = BGP_DEFAULT_HOLDTIME;
return 0;
}
/* BGP confederation configuration. */
int
bgp_confederation_id_set (struct bgp *bgp, as_t as)
{
struct peer *peer;
struct listnode *node, *nnode;
int already_confed;
if (as == 0)
return BGP_ERR_INVALID_AS;
/* Remember - were we doing confederation before? */
already_confed = bgp_config_check (bgp, BGP_CONFIG_CONFEDERATION);
bgp->confed_id = as;
bgp_config_set (bgp, BGP_CONFIG_CONFEDERATION);
/* If we were doing confederation already, this is just an external
AS change. Just Reset EBGP sessions, not CONFED sessions. If we
were not doing confederation before, reset all EBGP sessions. */
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
/* We're looking for peers who's AS is not local or part of our
confederation. */
if (already_confed)
{
if (peer_sort (peer) == BGP_PEER_EBGP)
{
peer->local_as = as;
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_CONFED_ID_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
}
else
{
/* Not doign confederation before, so reset every non-local
session */
if (peer_sort (peer) != BGP_PEER_IBGP)
{
/* Reset the local_as to be our EBGP one */
if (peer_sort (peer) == BGP_PEER_EBGP)
peer->local_as = as;
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_CONFED_ID_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
}
}
return 0;
}
int
bgp_confederation_id_unset (struct bgp *bgp)
{
struct peer *peer;
struct listnode *node, *nnode;
bgp->confed_id = 0;
bgp_config_unset (bgp, BGP_CONFIG_CONFEDERATION);
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
/* We're looking for peers who's AS is not local */
if (peer_sort (peer) != BGP_PEER_IBGP)
{
peer->local_as = bgp->as;
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_CONFED_ID_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
}
return 0;
}
/* Is an AS part of the confed or not? */
int
bgp_confederation_peers_check (struct bgp *bgp, as_t as)
{
int i;
if (! bgp)
return 0;
for (i = 0; i < bgp->confed_peers_cnt; i++)
if (bgp->confed_peers[i] == as)
return 1;
return 0;
}
/* Add an AS to the confederation set. */
int
bgp_confederation_peers_add (struct bgp *bgp, as_t as)
{
struct peer *peer;
struct listnode *node, *nnode;
if (! bgp)
return BGP_ERR_INVALID_BGP;
if (bgp->as == as)
return BGP_ERR_INVALID_AS;
if (bgp_confederation_peers_check (bgp, as))
return -1;
if (bgp->confed_peers)
bgp->confed_peers = XREALLOC (MTYPE_BGP_CONFED_LIST,
bgp->confed_peers,
(bgp->confed_peers_cnt + 1) * sizeof (as_t));
else
bgp->confed_peers = XMALLOC (MTYPE_BGP_CONFED_LIST,
(bgp->confed_peers_cnt + 1) * sizeof (as_t));
bgp->confed_peers[bgp->confed_peers_cnt] = as;
bgp->confed_peers_cnt++;
if (bgp_config_check (bgp, BGP_CONFIG_CONFEDERATION))
{
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
if (peer->as == as)
{
peer->local_as = bgp->as;
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_CONFED_PEER_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
}
}
return 0;
}
/* Delete an AS from the confederation set. */
int
bgp_confederation_peers_remove (struct bgp *bgp, as_t as)
{
int i;
int j;
struct peer *peer;
struct listnode *node, *nnode;
if (! bgp)
return -1;
if (! bgp_confederation_peers_check (bgp, as))
return -1;
for (i = 0; i < bgp->confed_peers_cnt; i++)
if (bgp->confed_peers[i] == as)
for(j = i + 1; j < bgp->confed_peers_cnt; j++)
bgp->confed_peers[j - 1] = bgp->confed_peers[j];
bgp->confed_peers_cnt--;
if (bgp->confed_peers_cnt == 0)
{
if (bgp->confed_peers)
XFREE (MTYPE_BGP_CONFED_LIST, bgp->confed_peers);
bgp->confed_peers = NULL;
}
else
bgp->confed_peers = XREALLOC (MTYPE_BGP_CONFED_LIST,
bgp->confed_peers,
bgp->confed_peers_cnt * sizeof (as_t));
/* Now reset any peer who's remote AS has just been removed from the
CONFED */
if (bgp_config_check (bgp, BGP_CONFIG_CONFEDERATION))
{
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
if (peer->as == as)
{
peer->local_as = bgp->confed_id;
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_CONFED_PEER_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
}
}
return 0;
}
/* Local preference configuration. */
int
bgp_default_local_preference_set (struct bgp *bgp, u_int32_t local_pref)
{
if (! bgp)
return -1;
bgp->default_local_pref = local_pref;
return 0;
}
int
bgp_default_local_preference_unset (struct bgp *bgp)
{
if (! bgp)
return -1;
bgp->default_local_pref = BGP_DEFAULT_LOCAL_PREF;
return 0;
}
/* If peer is RSERVER_CLIENT in at least one address family and is not member
of a peer_group for that family, return 1.
Used to check wether the peer is included in list bgp->rsclient. */
int
peer_rsclient_active (struct peer *peer)
{
int i;
int j;
for (i=AFI_IP; i < AFI_MAX; i++)
for (j=SAFI_UNICAST; j < SAFI_MAX; j++)
if (CHECK_FLAG(peer->af_flags[i][j], PEER_FLAG_RSERVER_CLIENT)
&& ! peer->af_group[i][j])
return 1;
return 0;
}
/* Peer comparison function for sorting. */
static int
peer_cmp (struct peer *p1, struct peer *p2)
{
return sockunion_cmp (&p1->su, &p2->su);
}
int
peer_af_flag_check (struct peer *peer, afi_t afi, safi_t safi, u_int32_t flag)
{
return CHECK_FLAG (peer->af_flags[afi][safi], flag);
}
/* Reset all address family specific configuration. */
static void
peer_af_flag_reset (struct peer *peer, afi_t afi, safi_t safi)
{
int i;
struct bgp_filter *filter;
char orf_name[BUFSIZ];
filter = &peer->filter[afi][safi];
/* Clear neighbor filter and route-map */
for (i = FILTER_IN; i < FILTER_MAX; i++)
{
if (filter->dlist[i].name)
{
free (filter->dlist[i].name);
filter->dlist[i].name = NULL;
}
if (filter->plist[i].name)
{
free (filter->plist[i].name);
filter->plist[i].name = NULL;
}
if (filter->aslist[i].name)
{
free (filter->aslist[i].name);
filter->aslist[i].name = NULL;
}
}
for (i = RMAP_IN; i < RMAP_MAX; i++)
{
if (filter->map[i].name)
{
free (filter->map[i].name);
filter->map[i].name = NULL;
}
}
/* Clear unsuppress map. */
if (filter->usmap.name)
free (filter->usmap.name);
filter->usmap.name = NULL;
filter->usmap.map = NULL;
/* Clear neighbor's all address family flags. */
peer->af_flags[afi][safi] = 0;
/* Clear neighbor's all address family sflags. */
peer->af_sflags[afi][safi] = 0;
/* Clear neighbor's all address family capabilities. */
peer->af_cap[afi][safi] = 0;
/* Clear ORF info */
peer->orf_plist[afi][safi] = NULL;
sprintf (orf_name, "%s.%d.%d", peer->host, afi, safi);
prefix_bgp_orf_remove_all (orf_name);
/* Set default neighbor send-community. */
if (! bgp_option_check (BGP_OPT_CONFIG_CISCO))
{
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_SEND_COMMUNITY);
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_SEND_EXT_COMMUNITY);
}
/* Clear neighbor default_originate_rmap */
if (peer->default_rmap[afi][safi].name)
free (peer->default_rmap[afi][safi].name);
peer->default_rmap[afi][safi].name = NULL;
peer->default_rmap[afi][safi].map = NULL;
/* Clear neighbor maximum-prefix */
peer->pmax[afi][safi] = 0;
peer->pmax_threshold[afi][safi] = MAXIMUM_PREFIX_THRESHOLD_DEFAULT;
}
/* peer global config reset */
static void
peer_global_config_reset (struct peer *peer)
{
peer->weight = 0;
peer->change_local_as = 0;
peer->ttl = (peer_sort (peer) == BGP_PEER_IBGP ? 255 : 1);
if (peer->update_source)
{
sockunion_free (peer->update_source);
peer->update_source = NULL;
}
if (peer->update_if)
{
XFREE (MTYPE_PEER_UPDATE_SOURCE, peer->update_if);
peer->update_if = NULL;
}
if (peer_sort (peer) == BGP_PEER_IBGP)
peer->v_routeadv = BGP_DEFAULT_IBGP_ROUTEADV;
else
peer->v_routeadv = BGP_DEFAULT_EBGP_ROUTEADV;
peer->flags = 0;
peer->config = 0;
peer->holdtime = 0;
peer->keepalive = 0;
peer->connect = 0;
peer->v_connect = BGP_DEFAULT_CONNECT_RETRY;
}
/* Check peer's AS number and determin is this peer IBGP or EBGP */
int
peer_sort (struct peer *peer)
{
struct bgp *bgp;
bgp = peer->bgp;
/* Peer-group */
if (CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->as)
return (bgp->as == peer->as ? BGP_PEER_IBGP : BGP_PEER_EBGP);
else
{
struct peer *peer1;
peer1 = listnode_head (peer->group->peer);
if (peer1)
return (peer1->local_as == peer1->as
? BGP_PEER_IBGP : BGP_PEER_EBGP);
}
return BGP_PEER_INTERNAL;
}
/* Normal peer */
if (bgp && CHECK_FLAG (bgp->config, BGP_CONFIG_CONFEDERATION))
{
if (peer->local_as == 0)
return BGP_PEER_INTERNAL;
if (peer->local_as == peer->as)
{
if (peer->local_as == bgp->confed_id)
return BGP_PEER_EBGP;
else
return BGP_PEER_IBGP;
}
if (bgp_confederation_peers_check (bgp, peer->as))
return BGP_PEER_CONFED;
return BGP_PEER_EBGP;
}
else
{
return (peer->local_as == 0
? BGP_PEER_INTERNAL : peer->local_as == peer->as
? BGP_PEER_IBGP : BGP_PEER_EBGP);
}
}
static void
peer_free (struct peer *peer)
{
assert (peer->status == Deleted);
bgp_unlock(peer->bgp);
/* this /ought/ to have been done already through bgp_stop earlier,
* but just to be sure..
*/
bgp_timer_set (peer);
BGP_READ_OFF (peer->t_read);
BGP_WRITE_OFF (peer->t_write);
BGP_EVENT_FLUSH (peer);
if (peer->desc)
XFREE (MTYPE_PEER_DESC, peer->desc);
/* Free allocated host character. */
if (peer->host)
XFREE (MTYPE_BGP_PEER_HOST, peer->host);
/* Update source configuration. */
if (peer->update_source)
sockunion_free (peer->update_source);
if (peer->update_if)
XFREE (MTYPE_PEER_UPDATE_SOURCE, peer->update_if);
if (peer->clear_node_queue)
work_queue_free (peer->clear_node_queue);
bgp_sync_delete (peer);
memset (peer, 0, sizeof (struct peer));
XFREE (MTYPE_BGP_PEER, peer);
}
/* increase reference count on a struct peer */
struct peer *
peer_lock (struct peer *peer)
{
assert (peer && (peer->lock >= 0));
peer->lock++;
return peer;
}
/* decrease reference count on a struct peer
* struct peer is freed and NULL returned if last reference
*/
struct peer *
peer_unlock (struct peer *peer)
{
assert (peer && (peer->lock > 0));
peer->lock--;
if (peer->lock == 0)
{
#if 0
zlog_debug ("unlocked and freeing");
zlog_backtrace (LOG_DEBUG);
#endif
peer_free (peer);
return NULL;
}
#if 0
if (peer->lock == 1)
{
zlog_debug ("unlocked to 1");
zlog_backtrace (LOG_DEBUG);
}
#endif
return peer;
}
/* Allocate new peer object, implicitely locked. */
static struct peer *
peer_new (struct bgp *bgp)
{
afi_t afi;
safi_t safi;
struct peer *peer;
struct servent *sp;
/* bgp argument is absolutely required */
assert (bgp);
if (!bgp)
return NULL;
/* Allocate new peer. */
peer = XCALLOC (MTYPE_BGP_PEER, sizeof (struct peer));
/* Set default value. */
peer->fd = -1;
peer->v_start = BGP_INIT_START_TIMER;
peer->v_connect = BGP_DEFAULT_CONNECT_RETRY;
peer->v_asorig = BGP_DEFAULT_ASORIGINATE;
peer->status = Idle;
peer->ostatus = Idle;
peer->weight = 0;
peer->password = NULL;
peer->bgp = bgp;
peer = peer_lock (peer); /* initial reference */
bgp_lock (bgp);
/* Set default flags. */
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++)
{
if (! bgp_option_check (BGP_OPT_CONFIG_CISCO))
{
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_SEND_COMMUNITY);
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_SEND_EXT_COMMUNITY);
}
peer->orf_plist[afi][safi] = NULL;
}
SET_FLAG (peer->sflags, PEER_STATUS_CAPABILITY_OPEN);
/* Create buffers. */
peer->ibuf = stream_new (BGP_MAX_PACKET_SIZE);
peer->obuf = stream_fifo_new ();
peer->work = stream_new (BGP_MAX_PACKET_SIZE);
bgp_sync_init (peer);
/* Get service port number. */
sp = getservbyname ("bgp", "tcp");
peer->port = (sp == NULL) ? BGP_PORT_DEFAULT : ntohs (sp->s_port);
return peer;
}
/* Create new BGP peer. */
static struct peer *
peer_create (union sockunion *su, struct bgp *bgp, as_t local_as,
as_t remote_as, afi_t afi, safi_t safi)
{
int active;
struct peer *peer;
char buf[SU_ADDRSTRLEN];
peer = peer_new (bgp);
peer->su = *su;
peer->local_as = local_as;
peer->as = remote_as;
peer->local_id = bgp->router_id;
peer->v_holdtime = bgp->default_holdtime;
peer->v_keepalive = bgp->default_keepalive;
if (peer_sort (peer) == BGP_PEER_IBGP)
peer->v_routeadv = BGP_DEFAULT_IBGP_ROUTEADV;
else
peer->v_routeadv = BGP_DEFAULT_EBGP_ROUTEADV;
peer = peer_lock (peer); /* bgp peer list reference */
listnode_add_sort (bgp->peer, peer);
active = peer_active (peer);
if (afi && safi)
peer->afc[afi][safi] = 1;
/* Last read and reset time set */
peer->readtime = peer->resettime = bgp_clock ();
/* Default TTL set. */
peer->ttl = (peer_sort (peer) == BGP_PEER_IBGP ? 255 : 1);
/* Make peer's address string. */
sockunion2str (su, buf, SU_ADDRSTRLEN);
peer->host = XSTRDUP (MTYPE_BGP_PEER_HOST, buf);
/* Set up peer's events and timers. */
if (! active && peer_active (peer))
bgp_timer_set (peer);
return peer;
}
/* Make accept BGP peer. Called from bgp_accept (). */
struct peer *
peer_create_accept (struct bgp *bgp)
{
struct peer *peer;
peer = peer_new (bgp);
peer = peer_lock (peer); /* bgp peer list reference */
listnode_add_sort (bgp->peer, peer);
return peer;
}
/* Change peer's AS number. */
static void
peer_as_change (struct peer *peer, as_t as)
{
int type;
/* Stop peer. */
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_REMOTE_AS_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
type = peer_sort (peer);
peer->as = as;
if (bgp_config_check (peer->bgp, BGP_CONFIG_CONFEDERATION)
&& ! bgp_confederation_peers_check (peer->bgp, as)
&& peer->bgp->as != as)
peer->local_as = peer->bgp->confed_id;
else
peer->local_as = peer->bgp->as;
/* Advertisement-interval reset */
if (peer_sort (peer) == BGP_PEER_IBGP)
peer->v_routeadv = BGP_DEFAULT_IBGP_ROUTEADV;
else
peer->v_routeadv = BGP_DEFAULT_EBGP_ROUTEADV;
/* TTL reset */
if (peer_sort (peer) == BGP_PEER_IBGP)
peer->ttl = 255;
else if (type == BGP_PEER_IBGP)
peer->ttl = 1;
/* reflector-client reset */
if (peer_sort (peer) != BGP_PEER_IBGP)
{
UNSET_FLAG (peer->af_flags[AFI_IP][SAFI_UNICAST],
PEER_FLAG_REFLECTOR_CLIENT);
UNSET_FLAG (peer->af_flags[AFI_IP][SAFI_MULTICAST],
PEER_FLAG_REFLECTOR_CLIENT);
UNSET_FLAG (peer->af_flags[AFI_IP][SAFI_MPLS_VPN],
PEER_FLAG_REFLECTOR_CLIENT);
UNSET_FLAG (peer->af_flags[AFI_IP6][SAFI_UNICAST],
PEER_FLAG_REFLECTOR_CLIENT);
UNSET_FLAG (peer->af_flags[AFI_IP6][SAFI_MULTICAST],
PEER_FLAG_REFLECTOR_CLIENT);
}
/* local-as reset */
if (peer_sort (peer) != BGP_PEER_EBGP)
{
peer->change_local_as = 0;
UNSET_FLAG (peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND);
}
}
/* If peer does not exist, create new one. If peer already exists,
set AS number to the peer. */
int
peer_remote_as (struct bgp *bgp, union sockunion *su, as_t *as,
afi_t afi, safi_t safi)
{
struct peer *peer;
as_t local_as;
peer = peer_lookup (bgp, su);
if (peer)
{
/* When this peer is a member of peer-group. */
if (peer->group)
{
if (peer->group->conf->as)
{
/* Return peer group's AS number. */
*as = peer->group->conf->as;
return BGP_ERR_PEER_GROUP_MEMBER;
}
if (peer_sort (peer->group->conf) == BGP_PEER_IBGP)
{
if (bgp->as != *as)
{
*as = peer->as;
return BGP_ERR_PEER_GROUP_PEER_TYPE_DIFFERENT;
}
}
else
{
if (bgp->as == *as)
{
*as = peer->as;
return BGP_ERR_PEER_GROUP_PEER_TYPE_DIFFERENT;
}
}
}
/* Existing peer's AS number change. */
if (peer->as != *as)
peer_as_change (peer, *as);
}
else
{
/* If the peer is not part of our confederation, and its not an
iBGP peer then spoof the source AS */
if (bgp_config_check (bgp, BGP_CONFIG_CONFEDERATION)
&& ! bgp_confederation_peers_check (bgp, *as)
&& bgp->as != *as)
local_as = bgp->confed_id;
else
local_as = bgp->as;
/* If this is IPv4 unicast configuration and "no bgp default
ipv4-unicast" is specified. */
if (bgp_flag_check (bgp, BGP_FLAG_NO_DEFAULT_IPV4)
&& afi == AFI_IP && safi == SAFI_UNICAST)
peer = peer_create (su, bgp, local_as, *as, 0, 0);
else
peer = peer_create (su, bgp, local_as, *as, afi, safi);
}
return 0;
}
/* Activate the peer or peer group for specified AFI and SAFI. */
int
peer_activate (struct peer *peer, afi_t afi, safi_t safi)
{
int active;
if (peer->afc[afi][safi])
return 0;
/* Activate the address family configuration. */
if (CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
peer->afc[afi][safi] = 1;
else
{
active = peer_active (peer);
peer->afc[afi][safi] = 1;
if (! active && peer_active (peer))
bgp_timer_set (peer);
else
{
if (peer->status == Established)
{
if (CHECK_FLAG (peer->cap, PEER_CAP_DYNAMIC_RCV))
{
peer->afc_adv[afi][safi] = 1;
bgp_capability_send (peer, afi, safi,
CAPABILITY_CODE_MP,
CAPABILITY_ACTION_SET);
if (peer->afc_recv[afi][safi])
{
peer->afc_nego[afi][safi] = 1;
bgp_announce_route (peer, afi, safi);
}
}
else
{
peer->last_reset = PEER_DOWN_AF_ACTIVATE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
}
}
}
return 0;
}
int
peer_deactivate (struct peer *peer, afi_t afi, safi_t safi)
{
struct peer_group *group;
struct peer *peer1;
struct listnode *node, *nnode;
if (CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer1))
{
if (peer1->af_group[afi][safi])
return BGP_ERR_PEER_GROUP_MEMBER_EXISTS;
}
}
else
{
if (peer->af_group[afi][safi])
return BGP_ERR_PEER_BELONGS_TO_GROUP;
}
if (! peer->afc[afi][safi])
return 0;
/* De-activate the address family configuration. */
peer->afc[afi][safi] = 0;
peer_af_flag_reset (peer, afi, safi);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->status == Established)
{
if (CHECK_FLAG (peer->cap, PEER_CAP_DYNAMIC_RCV))
{
peer->afc_adv[afi][safi] = 0;
peer->afc_nego[afi][safi] = 0;
if (peer_active_nego (peer))
{
bgp_capability_send (peer, afi, safi,
CAPABILITY_CODE_MP,
CAPABILITY_ACTION_UNSET);
bgp_clear_route (peer, afi, safi, BGP_CLEAR_ROUTE_NORMAL);
peer->pcount[afi][safi] = 0;
}
else
{
peer->last_reset = PEER_DOWN_NEIGHBOR_DELETE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
}
else
{
peer->last_reset = PEER_DOWN_NEIGHBOR_DELETE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
}
}
return 0;
}
static void
peer_nsf_stop (struct peer *peer)
{
afi_t afi;
safi_t safi;
UNSET_FLAG (peer->sflags, PEER_STATUS_NSF_WAIT);
UNSET_FLAG (peer->sflags, PEER_STATUS_NSF_MODE);
for (afi = AFI_IP ; afi < AFI_MAX ; afi++)
for (safi = SAFI_UNICAST ; safi < SAFI_RESERVED_3 ; safi++)
peer->nsf[afi][safi] = 0;
if (peer->t_gr_restart)
{
BGP_TIMER_OFF (peer->t_gr_restart);
if (BGP_DEBUG (events, EVENTS))
zlog_debug ("%s graceful restart timer stopped", peer->host);
}
if (peer->t_gr_stale)
{
BGP_TIMER_OFF (peer->t_gr_stale);
if (BGP_DEBUG (events, EVENTS))
zlog_debug ("%s graceful restart stalepath timer stopped", peer->host);
}
bgp_clear_route_all (peer);
}
/* Delete peer from confguration.
*
* The peer is moved to a dead-end "Deleted" neighbour-state, to allow
* it to "cool off" and refcounts to hit 0, at which state it is freed.
*
* This function /should/ take care to be idempotent, to guard against
* it being called multiple times through stray events that come in
* that happen to result in this function being called again. That
* said, getting here for a "Deleted" peer is a bug in the neighbour
* FSM.
*/
int
peer_delete (struct peer *peer)
{
int i;
afi_t afi;
safi_t safi;
struct bgp *bgp;
struct bgp_filter *filter;
struct listnode *pn;
assert (peer->status != Deleted);
bgp = peer->bgp;
if (CHECK_FLAG (peer->sflags, PEER_STATUS_NSF_WAIT))
peer_nsf_stop (peer);
/* If this peer belongs to peer group, clear up the
relationship. */
if (peer->group)
{
if ((pn = listnode_lookup (peer->group->peer, peer)))
{
peer = peer_unlock (peer); /* group->peer list reference */
list_delete_node (peer->group->peer, pn);
}
peer->group = NULL;
}
/* Withdraw all information from routing table. We can not use
* BGP_EVENT_ADD (peer, BGP_Stop) at here. Because the event is
* executed after peer structure is deleted.
*/
peer->last_reset = PEER_DOWN_NEIGHBOR_DELETE;
bgp_stop (peer);
bgp_fsm_change_status (peer, Deleted);
/* Password configuration */
if (peer->password)
{
XFREE (MTYPE_PEER_PASSWORD, peer->password);
peer->password = NULL;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
bgp_md5_set (peer);
}
bgp_timer_set (peer); /* stops all timers for Deleted */
/* Delete from all peer list. */
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP)
&& (pn = listnode_lookup (bgp->peer, peer)))
{
peer_unlock (peer); /* bgp peer list reference */
list_delete_node (bgp->peer, pn);
}
if (peer_rsclient_active (peer)
&& (pn = listnode_lookup (bgp->rsclient, peer)))
{
peer_unlock (peer); /* rsclient list reference */
list_delete_node (bgp->rsclient, pn);
/* Clear our own rsclient ribs. */
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++)
if (CHECK_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_RSERVER_CLIENT))
bgp_clear_route (peer, afi, safi, BGP_CLEAR_ROUTE_MY_RSCLIENT);
}
/* Free RIB for any family in which peer is RSERVER_CLIENT, and is not
member of a peer_group. */
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++)
if (peer->rib[afi][safi] && ! peer->af_group[afi][safi])
bgp_table_finish (&peer->rib[afi][safi]);
/* Buffers. */
if (peer->ibuf)
stream_free (peer->ibuf);
if (peer->obuf)
stream_fifo_free (peer->obuf);
if (peer->work)
stream_free (peer->work);
peer->obuf = NULL;
peer->work = peer->ibuf = NULL;
/* Local and remote addresses. */
if (peer->su_local)
sockunion_free (peer->su_local);
if (peer->su_remote)
sockunion_free (peer->su_remote);
peer->su_local = peer->su_remote = NULL;
/* Free filter related memory. */
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++)
{
filter = &peer->filter[afi][safi];
for (i = FILTER_IN; i < FILTER_MAX; i++)
{
if (filter->dlist[i].name)
free (filter->dlist[i].name);
if (filter->plist[i].name)
free (filter->plist[i].name);
if (filter->aslist[i].name)
free (filter->aslist[i].name);
filter->dlist[i].name = NULL;
filter->plist[i].name = NULL;
filter->aslist[i].name = NULL;
}
for (i = RMAP_IN; i < RMAP_MAX; i++)
{
if (filter->map[i].name)
free (filter->map[i].name);
filter->map[i].name = NULL;
}
if (filter->usmap.name)
free (filter->usmap.name);
if (peer->default_rmap[afi][safi].name)
free (peer->default_rmap[afi][safi].name);
filter->usmap.name = NULL;
peer->default_rmap[afi][safi].name = NULL;
}
peer_unlock (peer); /* initial reference */
return 0;
}
static int
peer_group_cmp (struct peer_group *g1, struct peer_group *g2)
{
return strcmp (g1->name, g2->name);
}
/* If peer is configured at least one address family return 1. */
static int
peer_group_active (struct peer *peer)
{
if (peer->af_group[AFI_IP][SAFI_UNICAST]
|| peer->af_group[AFI_IP][SAFI_MULTICAST]
|| peer->af_group[AFI_IP][SAFI_MPLS_VPN]
|| peer->af_group[AFI_IP6][SAFI_UNICAST]
|| peer->af_group[AFI_IP6][SAFI_MULTICAST])
return 1;
return 0;
}
/* Peer group cofiguration. */
static struct peer_group *
peer_group_new (void)
{
return (struct peer_group *) XCALLOC (MTYPE_PEER_GROUP,
sizeof (struct peer_group));
}
static void
peer_group_free (struct peer_group *group)
{
XFREE (MTYPE_PEER_GROUP, group);
}
struct peer_group *
peer_group_lookup (struct bgp *bgp, const char *name)
{
struct peer_group *group;
struct listnode *node, *nnode;
for (ALL_LIST_ELEMENTS (bgp->group, node, nnode, group))
{
if (strcmp (group->name, name) == 0)
return group;
}
return NULL;
}
struct peer_group *
peer_group_get (struct bgp *bgp, const char *name)
{
struct peer_group *group;
group = peer_group_lookup (bgp, name);
if (group)
return group;
group = peer_group_new ();
group->bgp = bgp;
group->name = strdup (name);
group->peer = list_new ();
group->conf = peer_new (bgp);
if (! bgp_flag_check (bgp, BGP_FLAG_NO_DEFAULT_IPV4))
group->conf->afc[AFI_IP][SAFI_UNICAST] = 1;
group->conf->host = XSTRDUP (MTYPE_BGP_PEER_HOST, name);
group->conf->group = group;
group->conf->as = 0;
group->conf->ttl = 1;
group->conf->gtsm_hops = 0;
group->conf->v_routeadv = BGP_DEFAULT_EBGP_ROUTEADV;
UNSET_FLAG (group->conf->config, PEER_CONFIG_TIMER);
UNSET_FLAG (group->conf->config, PEER_CONFIG_CONNECT);
group->conf->keepalive = 0;
group->conf->holdtime = 0;
group->conf->connect = 0;
SET_FLAG (group->conf->sflags, PEER_STATUS_GROUP);
listnode_add_sort (bgp->group, group);
return 0;
}
static void
peer_group2peer_config_copy (struct peer_group *group, struct peer *peer,
afi_t afi, safi_t safi)
{
int in = FILTER_IN;
int out = FILTER_OUT;
struct peer *conf;
struct bgp_filter *pfilter;
struct bgp_filter *gfilter;
conf = group->conf;
pfilter = &peer->filter[afi][safi];
gfilter = &conf->filter[afi][safi];
/* remote-as */
if (conf->as)
peer->as = conf->as;
/* remote-as */
if (conf->change_local_as)
peer->change_local_as = conf->change_local_as;
/* TTL */
peer->ttl = conf->ttl;
/* GTSM hops */
peer->gtsm_hops = conf->gtsm_hops;
/* Weight */
peer->weight = conf->weight;
/* peer flags apply */
peer->flags = conf->flags;
/* peer af_flags apply */
peer->af_flags[afi][safi] = conf->af_flags[afi][safi];
/* peer config apply */
peer->config = conf->config;
/* peer timers apply */
peer->holdtime = conf->holdtime;
peer->keepalive = conf->keepalive;
peer->connect = conf->connect;
if (CHECK_FLAG (conf->config, PEER_CONFIG_CONNECT))
peer->v_connect = conf->connect;
else
peer->v_connect = BGP_DEFAULT_CONNECT_RETRY;
/* advertisement-interval reset */
if (peer_sort (peer) == BGP_PEER_IBGP)
peer->v_routeadv = BGP_DEFAULT_IBGP_ROUTEADV;
else
peer->v_routeadv = BGP_DEFAULT_EBGP_ROUTEADV;
/* password apply */
if (peer->password)
XFREE (MTYPE_PEER_PASSWORD, peer->password);
if (conf->password)
peer->password = XSTRDUP (MTYPE_PEER_PASSWORD, conf->password);
else
peer->password = NULL;
bgp_md5_set (peer);
/* maximum-prefix */
peer->pmax[afi][safi] = conf->pmax[afi][safi];
peer->pmax_threshold[afi][safi] = conf->pmax_threshold[afi][safi];
peer->pmax_restart[afi][safi] = conf->pmax_restart[afi][safi];
/* allowas-in */
peer->allowas_in[afi][safi] = conf->allowas_in[afi][safi];
/* route-server-client */
if (CHECK_FLAG(conf->af_flags[afi][safi], PEER_FLAG_RSERVER_CLIENT))
{
/* Make peer's RIB point to group's RIB. */
peer->rib[afi][safi] = group->conf->rib[afi][safi];
/* Import policy. */
if (pfilter->map[RMAP_IMPORT].name)
free (pfilter->map[RMAP_IMPORT].name);
if (gfilter->map[RMAP_IMPORT].name)
{
pfilter->map[RMAP_IMPORT].name = strdup (gfilter->map[RMAP_IMPORT].name);
pfilter->map[RMAP_IMPORT].map = gfilter->map[RMAP_IMPORT].map;
}
else
{
pfilter->map[RMAP_IMPORT].name = NULL;
pfilter->map[RMAP_IMPORT].map = NULL;
}
/* Export policy. */
if (gfilter->map[RMAP_EXPORT].name && ! pfilter->map[RMAP_EXPORT].name)
{
pfilter->map[RMAP_EXPORT].name = strdup (gfilter->map[RMAP_EXPORT].name);
pfilter->map[RMAP_EXPORT].map = gfilter->map[RMAP_EXPORT].map;
}
}
/* default-originate route-map */
if (conf->default_rmap[afi][safi].name)
{
if (peer->default_rmap[afi][safi].name)
free (peer->default_rmap[afi][safi].name);
peer->default_rmap[afi][safi].name = strdup (conf->default_rmap[afi][safi].name);
peer->default_rmap[afi][safi].map = conf->default_rmap[afi][safi].map;
}
/* update-source apply */
if (conf->update_source)
{
if (peer->update_source)
sockunion_free (peer->update_source);
if (peer->update_if)
{
XFREE (MTYPE_PEER_UPDATE_SOURCE, peer->update_if);
peer->update_if = NULL;
}
peer->update_source = sockunion_dup (conf->update_source);
}
else if (conf->update_if)
{
if (peer->update_if)
XFREE (MTYPE_PEER_UPDATE_SOURCE, peer->update_if);
if (peer->update_source)
{
sockunion_free (peer->update_source);
peer->update_source = NULL;
}
peer->update_if = XSTRDUP (MTYPE_PEER_UPDATE_SOURCE, conf->update_if);
}
/* inbound filter apply */
if (gfilter->dlist[in].name && ! pfilter->dlist[in].name)
{
if (pfilter->dlist[in].name)
free (pfilter->dlist[in].name);
pfilter->dlist[in].name = strdup (gfilter->dlist[in].name);
pfilter->dlist[in].alist = gfilter->dlist[in].alist;
}
if (gfilter->plist[in].name && ! pfilter->plist[in].name)
{
if (pfilter->plist[in].name)
free (pfilter->plist[in].name);
pfilter->plist[in].name = strdup (gfilter->plist[in].name);
pfilter->plist[in].plist = gfilter->plist[in].plist;
}
if (gfilter->aslist[in].name && ! pfilter->aslist[in].name)
{
if (pfilter->aslist[in].name)
free (pfilter->aslist[in].name);
pfilter->aslist[in].name = strdup (gfilter->aslist[in].name);
pfilter->aslist[in].aslist = gfilter->aslist[in].aslist;
}
if (gfilter->map[RMAP_IN].name && ! pfilter->map[RMAP_IN].name)
{
if (pfilter->map[RMAP_IN].name)
free (pfilter->map[RMAP_IN].name);
pfilter->map[RMAP_IN].name = strdup (gfilter->map[RMAP_IN].name);
pfilter->map[RMAP_IN].map = gfilter->map[RMAP_IN].map;
}
/* outbound filter apply */
if (gfilter->dlist[out].name)
{
if (pfilter->dlist[out].name)
free (pfilter->dlist[out].name);
pfilter->dlist[out].name = strdup (gfilter->dlist[out].name);
pfilter->dlist[out].alist = gfilter->dlist[out].alist;
}
else
{
if (pfilter->dlist[out].name)
free (pfilter->dlist[out].name);
pfilter->dlist[out].name = NULL;
pfilter->dlist[out].alist = NULL;
}
if (gfilter->plist[out].name)
{
if (pfilter->plist[out].name)
free (pfilter->plist[out].name);
pfilter->plist[out].name = strdup (gfilter->plist[out].name);
pfilter->plist[out].plist = gfilter->plist[out].plist;
}
else
{
if (pfilter->plist[out].name)
free (pfilter->plist[out].name);
pfilter->plist[out].name = NULL;
pfilter->plist[out].plist = NULL;
}
if (gfilter->aslist[out].name)
{
if (pfilter->aslist[out].name)
free (pfilter->aslist[out].name);
pfilter->aslist[out].name = strdup (gfilter->aslist[out].name);
pfilter->aslist[out].aslist = gfilter->aslist[out].aslist;
}
else
{
if (pfilter->aslist[out].name)
free (pfilter->aslist[out].name);
pfilter->aslist[out].name = NULL;
pfilter->aslist[out].aslist = NULL;
}
if (gfilter->map[RMAP_OUT].name)
{
if (pfilter->map[RMAP_OUT].name)
free (pfilter->map[RMAP_OUT].name);
pfilter->map[RMAP_OUT].name = strdup (gfilter->map[RMAP_OUT].name);
pfilter->map[RMAP_OUT].map = gfilter->map[RMAP_OUT].map;
}
else
{
if (pfilter->map[RMAP_OUT].name)
free (pfilter->map[RMAP_OUT].name);
pfilter->map[RMAP_OUT].name = NULL;
pfilter->map[RMAP_OUT].map = NULL;
}
/* RS-client's import/export route-maps. */
if (gfilter->map[RMAP_IMPORT].name)
{
if (pfilter->map[RMAP_IMPORT].name)
free (pfilter->map[RMAP_IMPORT].name);
pfilter->map[RMAP_IMPORT].name = strdup (gfilter->map[RMAP_IMPORT].name);
pfilter->map[RMAP_IMPORT].map = gfilter->map[RMAP_IMPORT].map;
}
else
{
if (pfilter->map[RMAP_IMPORT].name)
free (pfilter->map[RMAP_IMPORT].name);
pfilter->map[RMAP_IMPORT].name = NULL;
pfilter->map[RMAP_IMPORT].map = NULL;
}
if (gfilter->map[RMAP_EXPORT].name && ! pfilter->map[RMAP_EXPORT].name)
{
if (pfilter->map[RMAP_EXPORT].name)
free (pfilter->map[RMAP_EXPORT].name);
pfilter->map[RMAP_EXPORT].name = strdup (gfilter->map[RMAP_EXPORT].name);
pfilter->map[RMAP_EXPORT].map = gfilter->map[RMAP_EXPORT].map;
}
if (gfilter->usmap.name)
{
if (pfilter->usmap.name)
free (pfilter->usmap.name);
pfilter->usmap.name = strdup (gfilter->usmap.name);
pfilter->usmap.map = gfilter->usmap.map;
}
else
{
if (pfilter->usmap.name)
free (pfilter->usmap.name);
pfilter->usmap.name = NULL;
pfilter->usmap.map = NULL;
}
}
/* Peer group's remote AS configuration. */
int
peer_group_remote_as (struct bgp *bgp, const char *group_name, as_t *as)
{
struct peer_group *group;
struct peer *peer;
struct listnode *node, *nnode;
group = peer_group_lookup (bgp, group_name);
if (! group)
return -1;
if (group->conf->as == *as)
return 0;
/* When we setup peer-group AS number all peer group member's AS
number must be updated to same number. */
peer_as_change (group->conf, *as);
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (peer->as != *as)
peer_as_change (peer, *as);
}
return 0;
}
int
peer_group_delete (struct peer_group *group)
{
struct bgp *bgp;
struct peer *peer;
struct listnode *node, *nnode;
bgp = group->bgp;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
peer->group = NULL;
peer_delete (peer);
}
list_delete (group->peer);
free (group->name);
group->name = NULL;
group->conf->group = NULL;
peer_delete (group->conf);
/* Delete from all peer_group list. */
listnode_delete (bgp->group, group);
peer_group_free (group);
return 0;
}
int
peer_group_remote_as_delete (struct peer_group *group)
{
struct peer *peer;
struct listnode *node, *nnode;
if (! group->conf->as)
return 0;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
peer->group = NULL;
peer_delete (peer);
}
list_delete_all_node (group->peer);
group->conf->as = 0;
return 0;
}
/* Bind specified peer to peer group. */
int
peer_group_bind (struct bgp *bgp, union sockunion *su,
struct peer_group *group, afi_t afi, safi_t safi, as_t *as)
{
struct peer *peer;
int first_member = 0;
/* Check peer group's address family. */
if (! group->conf->afc[afi][safi])
return BGP_ERR_PEER_GROUP_AF_UNCONFIGURED;
/* Lookup the peer. */
peer = peer_lookup (bgp, su);
/* Create a new peer. */
if (! peer)
{
if (! group->conf->as)
return BGP_ERR_PEER_GROUP_NO_REMOTE_AS;
peer = peer_create (su, bgp, bgp->as, group->conf->as, afi, safi);
peer->group = group;
peer->af_group[afi][safi] = 1;
peer = peer_lock (peer); /* group->peer list reference */
listnode_add (group->peer, peer);
peer_group2peer_config_copy (group, peer, afi, safi);
return 0;
}
/* When the peer already belongs to peer group, check the consistency. */
if (peer->af_group[afi][safi])
{
if (strcmp (peer->group->name, group->name) != 0)
return BGP_ERR_PEER_GROUP_CANT_CHANGE;
return 0;
}
/* Check current peer group configuration. */
if (peer_group_active (peer)
&& strcmp (peer->group->name, group->name) != 0)
return BGP_ERR_PEER_GROUP_MISMATCH;
if (! group->conf->as)
{
if (peer_sort (group->conf) != BGP_PEER_INTERNAL
&& peer_sort (group->conf) != peer_sort (peer))
{
if (as)
*as = peer->as;
return BGP_ERR_PEER_GROUP_PEER_TYPE_DIFFERENT;
}
if (peer_sort (group->conf) == BGP_PEER_INTERNAL)
first_member = 1;
}
peer->af_group[afi][safi] = 1;
peer->afc[afi][safi] = 1;
if (! peer->group)
{
peer->group = group;
peer = peer_lock (peer); /* group->peer list reference */
listnode_add (group->peer, peer);
}
else
assert (group && peer->group == group);
if (first_member)
{
/* Advertisement-interval reset */
if (peer_sort (group->conf) == BGP_PEER_IBGP)
group->conf->v_routeadv = BGP_DEFAULT_IBGP_ROUTEADV;
else
group->conf->v_routeadv = BGP_DEFAULT_EBGP_ROUTEADV;
/* ebgp-multihop reset */
if (peer_sort (group->conf) == BGP_PEER_IBGP)
group->conf->ttl = 255;
/* local-as reset */
if (peer_sort (group->conf) != BGP_PEER_EBGP)
{
group->conf->change_local_as = 0;
UNSET_FLAG (peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND);
}
}
if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_RSERVER_CLIENT))
{
struct listnode *pn;
/* If it's not configured as RSERVER_CLIENT in any other address
family, without being member of a peer_group, remove it from
list bgp->rsclient.*/
if (! peer_rsclient_active (peer)
&& (pn = listnode_lookup (bgp->rsclient, peer)))
{
peer_unlock (peer); /* peer rsclient reference */
list_delete_node (bgp->rsclient, pn);
/* Clear our own rsclient rib for this afi/safi. */
bgp_clear_route (peer, afi, safi, BGP_CLEAR_ROUTE_MY_RSCLIENT);
}
bgp_table_finish (&peer->rib[afi][safi]);
/* Import policy. */
if (peer->filter[afi][safi].map[RMAP_IMPORT].name)
{
free (peer->filter[afi][safi].map[RMAP_IMPORT].name);
peer->filter[afi][safi].map[RMAP_IMPORT].name = NULL;
peer->filter[afi][safi].map[RMAP_IMPORT].map = NULL;
}
/* Export policy. */
if (! CHECK_FLAG(group->conf->af_flags[afi][safi], PEER_FLAG_RSERVER_CLIENT)
&& peer->filter[afi][safi].map[RMAP_EXPORT].name)
{
free (peer->filter[afi][safi].map[RMAP_EXPORT].name);
peer->filter[afi][safi].map[RMAP_EXPORT].name = NULL;
peer->filter[afi][safi].map[RMAP_EXPORT].map = NULL;
}
}
peer_group2peer_config_copy (group, peer, afi, safi);
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_RMAP_BIND;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
return 0;
}
int
peer_group_unbind (struct bgp *bgp, struct peer *peer,
struct peer_group *group, afi_t afi, safi_t safi)
{
if (! peer->af_group[afi][safi])
return 0;
if (group != peer->group)
return BGP_ERR_PEER_GROUP_MISMATCH;
peer->af_group[afi][safi] = 0;
peer->afc[afi][safi] = 0;
peer_af_flag_reset (peer, afi, safi);
if (peer->rib[afi][safi])
peer->rib[afi][safi] = NULL;
if (! peer_group_active (peer))
{
assert (listnode_lookup (group->peer, peer));
peer_unlock (peer); /* peer group list reference */
listnode_delete (group->peer, peer);
peer->group = NULL;
if (group->conf->as)
{
peer_delete (peer);
return 0;
}
peer_global_config_reset (peer);
}
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_RMAP_UNBIND;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
return 0;
}
/* BGP instance creation by `router bgp' commands. */
static struct bgp *
bgp_create (as_t *as, const char *name)
{
struct bgp *bgp;
afi_t afi;
safi_t safi;
if ( (bgp = XCALLOC (MTYPE_BGP, sizeof (struct bgp))) == NULL)
return NULL;
bgp_lock (bgp);
bgp->peer_self = peer_new (bgp);
bgp->peer_self->host = XSTRDUP (MTYPE_BGP_PEER_HOST, "Static announcement");
bgp->peer = list_new ();
bgp->peer->cmp = (int (*)(void *, void *)) peer_cmp;
bgp->group = list_new ();
bgp->group->cmp = (int (*)(void *, void *)) peer_group_cmp;
bgp->rsclient = list_new ();
bgp->rsclient->cmp = (int (*)(void*, void*)) peer_cmp;
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++)
{
bgp->route[afi][safi] = bgp_table_init (afi, safi);
bgp->aggregate[afi][safi] = bgp_table_init (afi, safi);
bgp->rib[afi][safi] = bgp_table_init (afi, safi);
bgp->maxpaths[afi][safi].maxpaths_ebgp = BGP_DEFAULT_MAXPATHS;
bgp->maxpaths[afi][safi].maxpaths_ibgp = BGP_DEFAULT_MAXPATHS;
}
bgp->default_local_pref = BGP_DEFAULT_LOCAL_PREF;
bgp->default_holdtime = BGP_DEFAULT_HOLDTIME;
bgp->default_keepalive = BGP_DEFAULT_KEEPALIVE;
bgp->restart_time = BGP_DEFAULT_RESTART_TIME;
bgp->stalepath_time = BGP_DEFAULT_STALEPATH_TIME;
bgp->as = *as;
if (name)
bgp->name = strdup (name);
return bgp;
}
/* Return first entry of BGP. */
struct bgp *
bgp_get_default (void)
{
if (bm->bgp->head)
return (listgetdata (listhead (bm->bgp)));
return NULL;
}
/* Lookup BGP entry. */
struct bgp *
bgp_lookup (as_t as, const char *name)
{
struct bgp *bgp;
struct listnode *node, *nnode;
for (ALL_LIST_ELEMENTS (bm->bgp, node, nnode, bgp))
if (bgp->as == as
&& ((bgp->name == NULL && name == NULL)
|| (bgp->name && name && strcmp (bgp->name, name) == 0)))
return bgp;
return NULL;
}
/* Lookup BGP structure by view name. */
struct bgp *
bgp_lookup_by_name (const char *name)
{
struct bgp *bgp;
struct listnode *node, *nnode;
for (ALL_LIST_ELEMENTS (bm->bgp, node, nnode, bgp))
if ((bgp->name == NULL && name == NULL)
|| (bgp->name && name && strcmp (bgp->name, name) == 0))
return bgp;
return NULL;
}
/* Called from VTY commands. */
int
bgp_get (struct bgp **bgp_val, as_t *as, const char *name)
{
struct bgp *bgp;
/* Multiple instance check. */
if (bgp_option_check (BGP_OPT_MULTIPLE_INSTANCE))
{
if (name)
bgp = bgp_lookup_by_name (name);
else
bgp = bgp_get_default ();
/* Already exists. */
if (bgp)
{
if (bgp->as != *as)
{
*as = bgp->as;
return BGP_ERR_INSTANCE_MISMATCH;
}
*bgp_val = bgp;
return 0;
}
}
else
{
/* BGP instance name can not be specified for single instance. */
if (name)
return BGP_ERR_MULTIPLE_INSTANCE_NOT_SET;
/* Get default BGP structure if exists. */
bgp = bgp_get_default ();
if (bgp)
{
if (bgp->as != *as)
{
*as = bgp->as;
return BGP_ERR_AS_MISMATCH;
}
*bgp_val = bgp;
return 0;
}
}
bgp = bgp_create (as, name);
bgp_router_id_set(bgp, &router_id_kroute);
*bgp_val = bgp;
/* Create BGP server socket, if first instance. */
if (list_isempty(bm->bgp))
{
if (bgp_socket (bm->port, bm->address) < 0)
return BGP_ERR_INVALID_VALUE;
}
listnode_add (bm->bgp, bgp);
return 0;
}
/* Delete BGP instance. */
int
bgp_delete (struct bgp *bgp)
{
struct peer *peer;
struct peer_group *group;
struct listnode *node;
struct listnode *next;
afi_t afi;
int i;
/* Delete static route. */
bgp_static_delete (bgp);
/* Unset redistribution. */
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (i = 0; i < KROUTE_ROUTE_MAX; i++)
if (i != KROUTE_ROUTE_BGP)
bgp_redistribute_unset (bgp, afi, i);
for (ALL_LIST_ELEMENTS (bgp->peer, node, next, peer))
peer_delete (peer);
for (ALL_LIST_ELEMENTS (bgp->group, node, next, group))
peer_group_delete (group);
assert (listcount (bgp->rsclient) == 0);
if (bgp->peer_self) {
peer_delete(bgp->peer_self);
bgp->peer_self = NULL;
}
/* Remove visibility via the master list - there may however still be
* routes to be processed still referencing the struct bgp.
*/
listnode_delete (bm->bgp, bgp);
if (list_isempty(bm->bgp))
bgp_close ();
bgp_unlock(bgp); /* initial reference */
return 0;
}
static void bgp_free (struct bgp *);
void
bgp_lock (struct bgp *bgp)
{
++bgp->lock;
}
void
bgp_unlock(struct bgp *bgp)
{
assert(bgp->lock > 0);
if (--bgp->lock == 0)
bgp_free (bgp);
}
static void
bgp_free (struct bgp *bgp)
{
afi_t afi;
safi_t safi;
list_delete (bgp->group);
list_delete (bgp->peer);
list_delete (bgp->rsclient);
if (bgp->name)
free (bgp->name);
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++)
{
if (bgp->route[afi][safi])
bgp_table_finish (&bgp->route[afi][safi]);
if (bgp->aggregate[afi][safi])
bgp_table_finish (&bgp->aggregate[afi][safi]) ;
if (bgp->rib[afi][safi])
bgp_table_finish (&bgp->rib[afi][safi]);
}
XFREE (MTYPE_BGP, bgp);
}
struct peer *
peer_lookup (struct bgp *bgp, union sockunion *su)
{
struct peer *peer;
struct listnode *node, *nnode;
if (bgp != NULL)
{
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
if (sockunion_same (&peer->su, su)
&& ! CHECK_FLAG (peer->sflags, PEER_STATUS_ACCEPT_PEER))
return peer;
}
else if (bm->bgp != NULL)
{
struct listnode *bgpnode, *nbgpnode;
for (ALL_LIST_ELEMENTS (bm->bgp, bgpnode, nbgpnode, bgp))
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
if (sockunion_same (&peer->su, su)
&& ! CHECK_FLAG (peer->sflags, PEER_STATUS_ACCEPT_PEER))
return peer;
}
return NULL;
}
struct peer *
peer_lookup_with_open (union sockunion *su, as_t remote_as,
struct in_addr *remote_id, int *as)
{
struct peer *peer;
struct listnode *node;
struct listnode *bgpnode;
struct bgp *bgp;
if (! bm->bgp)
return NULL;
for (ALL_LIST_ELEMENTS_RO (bm->bgp, bgpnode, bgp))
{
for (ALL_LIST_ELEMENTS_RO (bgp->peer, node, peer))
{
if (sockunion_same (&peer->su, su)
&& ! CHECK_FLAG (peer->sflags, PEER_STATUS_ACCEPT_PEER))
{
if (peer->as == remote_as
&& peer->remote_id.s_addr == remote_id->s_addr)
return peer;
if (peer->as == remote_as)
*as = 1;
}
}
for (ALL_LIST_ELEMENTS_RO (bgp->peer, node, peer))
{
if (sockunion_same (&peer->su, su)
&& ! CHECK_FLAG (peer->sflags, PEER_STATUS_ACCEPT_PEER))
{
if (peer->as == remote_as
&& peer->remote_id.s_addr == 0)
return peer;
if (peer->as == remote_as)
*as = 1;
}
}
}
return NULL;
}
/* If peer is configured at least one address family return 1. */
int
peer_active (struct peer *peer)
{
if (peer->afc[AFI_IP][SAFI_UNICAST]
|| peer->afc[AFI_IP][SAFI_MULTICAST]
|| peer->afc[AFI_IP][SAFI_MPLS_VPN]
|| peer->afc[AFI_IP6][SAFI_UNICAST]
|| peer->afc[AFI_IP6][SAFI_MULTICAST])
return 1;
return 0;
}
/* If peer is negotiated at least one address family return 1. */
int
peer_active_nego (struct peer *peer)
{
if (peer->afc_nego[AFI_IP][SAFI_UNICAST]
|| peer->afc_nego[AFI_IP][SAFI_MULTICAST]
|| peer->afc_nego[AFI_IP][SAFI_MPLS_VPN]
|| peer->afc_nego[AFI_IP6][SAFI_UNICAST]
|| peer->afc_nego[AFI_IP6][SAFI_MULTICAST])
return 1;
return 0;
}
/* peer_flag_change_type. */
enum peer_change_type
{
peer_change_none,
peer_change_reset,
peer_change_reset_in,
peer_change_reset_out,
};
static void
peer_change_action (struct peer *peer, afi_t afi, safi_t safi,
enum peer_change_type type)
{
if (CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return;
if (type == peer_change_reset)
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
else if (type == peer_change_reset_in)
{
if (CHECK_FLAG (peer->cap, PEER_CAP_REFRESH_OLD_RCV)
|| CHECK_FLAG (peer->cap, PEER_CAP_REFRESH_NEW_RCV))
bgp_route_refresh_send (peer, afi, safi, 0, 0, 0);
else
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else if (type == peer_change_reset_out)
bgp_announce_route (peer, afi, safi);
}
struct peer_flag_action
{
/* Peer's flag. */
u_int32_t flag;
/* This flag can be set for peer-group member. */
u_char not_for_member;
/* Action when the flag is changed. */
enum peer_change_type type;
/* Peer down cause */
u_char peer_down;
};
static const struct peer_flag_action peer_flag_action_list[] =
{
{ PEER_FLAG_PASSIVE, 0, peer_change_reset },
{ PEER_FLAG_SHUTDOWN, 0, peer_change_reset },
{ PEER_FLAG_DONT_CAPABILITY, 0, peer_change_none },
{ PEER_FLAG_OVERRIDE_CAPABILITY, 0, peer_change_none },
{ PEER_FLAG_STRICT_CAP_MATCH, 0, peer_change_none },
{ PEER_FLAG_DYNAMIC_CAPABILITY, 0, peer_change_reset },
{ PEER_FLAG_DISABLE_CONNECTED_CHECK, 0, peer_change_reset },
{ 0, 0, 0 }
};
static const struct peer_flag_action peer_af_flag_action_list[] =
{
{ PEER_FLAG_NEXTHOP_SELF, 1, peer_change_reset_out },
{ PEER_FLAG_SEND_COMMUNITY, 1, peer_change_reset_out },
{ PEER_FLAG_SEND_EXT_COMMUNITY, 1, peer_change_reset_out },
{ PEER_FLAG_SOFT_RECONFIG, 0, peer_change_reset_in },
{ PEER_FLAG_REFLECTOR_CLIENT, 1, peer_change_reset },
{ PEER_FLAG_RSERVER_CLIENT, 1, peer_change_reset },
{ PEER_FLAG_AS_PATH_UNCHANGED, 1, peer_change_reset_out },
{ PEER_FLAG_NEXTHOP_UNCHANGED, 1, peer_change_reset_out },
{ PEER_FLAG_MED_UNCHANGED, 1, peer_change_reset_out },
{ PEER_FLAG_REMOVE_PRIVATE_AS, 1, peer_change_reset_out },
{ PEER_FLAG_ALLOWAS_IN, 0, peer_change_reset_in },
{ PEER_FLAG_ORF_PREFIX_SM, 1, peer_change_reset },
{ PEER_FLAG_ORF_PREFIX_RM, 1, peer_change_reset },
{ PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED, 0, peer_change_reset_out },
{ 0, 0, 0 }
};
/* Proper action set. */
static int
peer_flag_action_set (const struct peer_flag_action *action_list, int size,
struct peer_flag_action *action, u_int32_t flag)
{
int i;
int found = 0;
int reset_in = 0;
int reset_out = 0;
const struct peer_flag_action *match = NULL;
/* Check peer's frag action. */
for (i = 0; i < size; i++)
{
match = &action_list[i];
if (match->flag == 0)
break;
if (match->flag & flag)
{
found = 1;
if (match->type == peer_change_reset_in)
reset_in = 1;
if (match->type == peer_change_reset_out)
reset_out = 1;
if (match->type == peer_change_reset)
{
reset_in = 1;
reset_out = 1;
}
if (match->not_for_member)
action->not_for_member = 1;
}
}
/* Set peer clear type. */
if (reset_in && reset_out)
action->type = peer_change_reset;
else if (reset_in)
action->type = peer_change_reset_in;
else if (reset_out)
action->type = peer_change_reset_out;
else
action->type = peer_change_none;
return found;
}
static void
peer_flag_modify_action (struct peer *peer, u_int32_t flag)
{
if (flag == PEER_FLAG_SHUTDOWN)
{
if (CHECK_FLAG (peer->flags, flag))
{
if (CHECK_FLAG (peer->sflags, PEER_STATUS_NSF_WAIT))
peer_nsf_stop (peer);
UNSET_FLAG (peer->sflags, PEER_STATUS_PREFIX_OVERFLOW);
if (peer->t_pmax_restart)
{
BGP_TIMER_OFF (peer->t_pmax_restart);
if (BGP_DEBUG (events, EVENTS))
zlog_debug ("%s Maximum-prefix restart timer canceled",
peer->host);
}
if (CHECK_FLAG (peer->sflags, PEER_STATUS_NSF_WAIT))
peer_nsf_stop (peer);
if (peer->status == Established)
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_ADMIN_SHUTDOWN);
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
else
{
peer->v_start = BGP_INIT_START_TIMER;
BGP_EVENT_ADD (peer, BGP_Stop);
}
}
else if (peer->status == Established)
{
if (flag == PEER_FLAG_DYNAMIC_CAPABILITY)
peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE;
else if (flag == PEER_FLAG_PASSIVE)
peer->last_reset = PEER_DOWN_PASSIVE_CHANGE;
else if (flag == PEER_FLAG_DISABLE_CONNECTED_CHECK)
peer->last_reset = PEER_DOWN_MULTIHOP_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
/* Change specified peer flag. */
static int
peer_flag_modify (struct peer *peer, u_int32_t flag, int set)
{
int found;
int size;
struct peer_group *group;
struct listnode *node, *nnode;
struct peer_flag_action action;
memset (&action, 0, sizeof (struct peer_flag_action));
size = sizeof peer_flag_action_list / sizeof (struct peer_flag_action);
found = peer_flag_action_set (peer_flag_action_list, size, &action, flag);
/* No flag action is found. */
if (! found)
return BGP_ERR_INVALID_FLAG;
/* Not for peer-group member. */
if (action.not_for_member && peer_group_active (peer))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
/* When unset the peer-group member's flag we have to check
peer-group configuration. */
if (! set && peer_group_active (peer))
if (CHECK_FLAG (peer->group->conf->flags, flag))
{
if (flag == PEER_FLAG_SHUTDOWN)
return BGP_ERR_PEER_GROUP_SHUTDOWN;
else
return BGP_ERR_PEER_GROUP_HAS_THE_FLAG;
}
/* Flag conflict check. */
if (set
&& CHECK_FLAG (peer->flags | flag, PEER_FLAG_STRICT_CAP_MATCH)
&& CHECK_FLAG (peer->flags | flag, PEER_FLAG_OVERRIDE_CAPABILITY))
return BGP_ERR_PEER_FLAG_CONFLICT;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (set && CHECK_FLAG (peer->flags, flag) == flag)
return 0;
if (! set && ! CHECK_FLAG (peer->flags, flag))
return 0;
}
if (set)
SET_FLAG (peer->flags, flag);
else
UNSET_FLAG (peer->flags, flag);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (action.type == peer_change_reset)
peer_flag_modify_action (peer, flag);
return 0;
}
/* peer-group member updates. */
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (set && CHECK_FLAG (peer->flags, flag) == flag)
continue;
if (! set && ! CHECK_FLAG (peer->flags, flag))
continue;
if (set)
SET_FLAG (peer->flags, flag);
else
UNSET_FLAG (peer->flags, flag);
if (action.type == peer_change_reset)
peer_flag_modify_action (peer, flag);
}
return 0;
}
int
peer_flag_set (struct peer *peer, u_int32_t flag)
{
return peer_flag_modify (peer, flag, 1);
}
int
peer_flag_unset (struct peer *peer, u_int32_t flag)
{
return peer_flag_modify (peer, flag, 0);
}
static int
peer_is_group_member (struct peer *peer, afi_t afi, safi_t safi)
{
if (peer->af_group[afi][safi])
return 1;
return 0;
}
static int
peer_af_flag_modify (struct peer *peer, afi_t afi, safi_t safi, u_int32_t flag,
int set)
{
int found;
int size;
struct listnode *node, *nnode;
struct peer_group *group;
struct peer_flag_action action;
memset (&action, 0, sizeof (struct peer_flag_action));
size = sizeof peer_af_flag_action_list / sizeof (struct peer_flag_action);
found = peer_flag_action_set (peer_af_flag_action_list, size, &action, flag);
/* No flag action is found. */
if (! found)
return BGP_ERR_INVALID_FLAG;
/* Adress family must be activated. */
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
/* Not for peer-group member. */
if (action.not_for_member && peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
/* Spcecial check for reflector client. */
if (flag & PEER_FLAG_REFLECTOR_CLIENT
&& peer_sort (peer) != BGP_PEER_IBGP)
return BGP_ERR_NOT_INTERNAL_PEER;
/* Spcecial check for remove-private-AS. */
if (flag & PEER_FLAG_REMOVE_PRIVATE_AS
&& peer_sort (peer) == BGP_PEER_IBGP)
return BGP_ERR_REMOVE_PRIVATE_AS;
/* When unset the peer-group member's flag we have to check
peer-group configuration. */
if (! set && peer->af_group[afi][safi])
if (CHECK_FLAG (peer->group->conf->af_flags[afi][safi], flag))
return BGP_ERR_PEER_GROUP_HAS_THE_FLAG;
/* When current flag configuration is same as requested one. */
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (set && CHECK_FLAG (peer->af_flags[afi][safi], flag) == flag)
return 0;
if (! set && ! CHECK_FLAG (peer->af_flags[afi][safi], flag))
return 0;
}
if (set)
SET_FLAG (peer->af_flags[afi][safi], flag);
else
UNSET_FLAG (peer->af_flags[afi][safi], flag);
/* Execute action when peer is established. */
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP)
&& peer->status == Established)
{
if (! set && flag == PEER_FLAG_SOFT_RECONFIG)
bgp_clear_adj_in (peer, afi, safi);
else
{
if (flag == PEER_FLAG_REFLECTOR_CLIENT)
peer->last_reset = PEER_DOWN_RR_CLIENT_CHANGE;
else if (flag == PEER_FLAG_RSERVER_CLIENT)
peer->last_reset = PEER_DOWN_RS_CLIENT_CHANGE;
else if (flag == PEER_FLAG_ORF_PREFIX_SM)
peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE;
else if (flag == PEER_FLAG_ORF_PREFIX_RM)
peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE;
peer_change_action (peer, afi, safi, action.type);
}
}
/* Peer group member updates. */
if (CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (! peer->af_group[afi][safi])
continue;
if (set && CHECK_FLAG (peer->af_flags[afi][safi], flag) == flag)
continue;
if (! set && ! CHECK_FLAG (peer->af_flags[afi][safi], flag))
continue;
if (set)
SET_FLAG (peer->af_flags[afi][safi], flag);
else
UNSET_FLAG (peer->af_flags[afi][safi], flag);
if (peer->status == Established)
{
if (! set && flag == PEER_FLAG_SOFT_RECONFIG)
bgp_clear_adj_in (peer, afi, safi);
else
{
if (flag == PEER_FLAG_REFLECTOR_CLIENT)
peer->last_reset = PEER_DOWN_RR_CLIENT_CHANGE;
else if (flag == PEER_FLAG_RSERVER_CLIENT)
peer->last_reset = PEER_DOWN_RS_CLIENT_CHANGE;
else if (flag == PEER_FLAG_ORF_PREFIX_SM)
peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE;
else if (flag == PEER_FLAG_ORF_PREFIX_RM)
peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE;
peer_change_action (peer, afi, safi, action.type);
}
}
}
}
return 0;
}
int
peer_af_flag_set (struct peer *peer, afi_t afi, safi_t safi, u_int32_t flag)
{
return peer_af_flag_modify (peer, afi, safi, flag, 1);
}
int
peer_af_flag_unset (struct peer *peer, afi_t afi, safi_t safi, u_int32_t flag)
{
return peer_af_flag_modify (peer, afi, safi, flag, 0);
}
/* EBGP multihop configuration. */
int
peer_ebgp_multihop_set (struct peer *peer, int ttl)
{
struct peer_group *group;
struct listnode *node, *nnode;
struct peer *peer1;
if (peer_sort (peer) == BGP_PEER_IBGP)
return 0;
/* see comment in peer_ttl_security_hops_set() */
if (ttl != MAXTTL)
{
if (CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
group = peer->group;
if (group->conf->gtsm_hops != 0)
return BGP_ERR_NO_EBGP_MULTIHOP_WITH_TTLHACK;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer1))
{
if (peer_sort (peer1) == BGP_PEER_IBGP)
continue;
if (peer1->gtsm_hops != 0)
return BGP_ERR_NO_EBGP_MULTIHOP_WITH_TTLHACK;
}
}
else
{
if (peer->gtsm_hops != 0)
return BGP_ERR_NO_EBGP_MULTIHOP_WITH_TTLHACK;
}
}
peer->ttl = ttl;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->fd >= 0 && peer_sort (peer) != BGP_PEER_IBGP)
sockopt_ttl (peer->su.sa.sa_family, peer->fd, peer->ttl);
}
else
{
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (peer_sort (peer) == BGP_PEER_IBGP)
continue;
peer->ttl = group->conf->ttl;
if (peer->fd >= 0)
sockopt_ttl (peer->su.sa.sa_family, peer->fd, peer->ttl);
}
}
return 0;
}
int
peer_ebgp_multihop_unset (struct peer *peer)
{
struct peer_group *group;
struct listnode *node, *nnode;
if (peer_sort (peer) == BGP_PEER_IBGP)
return 0;
if (peer->gtsm_hops != 0 && peer->ttl != MAXTTL)
return BGP_ERR_NO_EBGP_MULTIHOP_WITH_TTLHACK;
if (peer_group_active (peer))
peer->ttl = peer->group->conf->ttl;
else
peer->ttl = 1;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->fd >= 0 && peer_sort (peer) != BGP_PEER_IBGP)
sockopt_ttl (peer->su.sa.sa_family, peer->fd, peer->ttl);
}
else
{
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (peer_sort (peer) == BGP_PEER_IBGP)
continue;
peer->ttl = 1;
if (peer->fd >= 0)
sockopt_ttl (peer->su.sa.sa_family, peer->fd, peer->ttl);
}
}
return 0;
}
/* Neighbor description. */
int
peer_description_set (struct peer *peer, char *desc)
{
if (peer->desc)
XFREE (MTYPE_PEER_DESC, peer->desc);
peer->desc = XSTRDUP (MTYPE_PEER_DESC, desc);
return 0;
}
int
peer_description_unset (struct peer *peer)
{
if (peer->desc)
XFREE (MTYPE_PEER_DESC, peer->desc);
peer->desc = NULL;
return 0;
}
/* Neighbor update-source. */
int
peer_update_source_if_set (struct peer *peer, const char *ifname)
{
struct peer_group *group;
struct listnode *node, *nnode;
if (peer->update_if)
{
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP)
&& strcmp (peer->update_if, ifname) == 0)
return 0;
XFREE (MTYPE_PEER_UPDATE_SOURCE, peer->update_if);
peer->update_if = NULL;
}
if (peer->update_source)
{
sockunion_free (peer->update_source);
peer->update_source = NULL;
}
peer->update_if = XSTRDUP (MTYPE_PEER_UPDATE_SOURCE, ifname);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
return 0;
}
/* peer-group member updates. */
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (peer->update_if)
{
if (strcmp (peer->update_if, ifname) == 0)
continue;
XFREE (MTYPE_PEER_UPDATE_SOURCE, peer->update_if);
peer->update_if = NULL;
}
if (peer->update_source)
{
sockunion_free (peer->update_source);
peer->update_source = NULL;
}
peer->update_if = XSTRDUP (MTYPE_PEER_UPDATE_SOURCE, ifname);
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
return 0;
}
int
peer_update_source_addr_set (struct peer *peer, union sockunion *su)
{
struct peer_group *group;
struct listnode *node, *nnode;
if (peer->update_source)
{
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP)
&& sockunion_cmp (peer->update_source, su) == 0)
return 0;
sockunion_free (peer->update_source);
peer->update_source = NULL;
}
if (peer->update_if)
{
XFREE (MTYPE_PEER_UPDATE_SOURCE, peer->update_if);
peer->update_if = NULL;
}
peer->update_source = sockunion_dup (su);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
return 0;
}
/* peer-group member updates. */
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (peer->update_source)
{
if (sockunion_cmp (peer->update_source, su) == 0)
continue;
sockunion_free (peer->update_source);
peer->update_source = NULL;
}
if (peer->update_if)
{
XFREE (MTYPE_PEER_UPDATE_SOURCE, peer->update_if);
peer->update_if = NULL;
}
peer->update_source = sockunion_dup (su);
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
return 0;
}
int
peer_update_source_unset (struct peer *peer)
{
union sockunion *su;
struct peer_group *group;
struct listnode *node, *nnode;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP)
&& ! peer->update_source
&& ! peer->update_if)
return 0;
if (peer->update_source)
{
sockunion_free (peer->update_source);
peer->update_source = NULL;
}
if (peer->update_if)
{
XFREE (MTYPE_PEER_UPDATE_SOURCE, peer->update_if);
peer->update_if = NULL;
}
if (peer_group_active (peer))
{
group = peer->group;
if (group->conf->update_source)
{
su = sockunion_dup (group->conf->update_source);
peer->update_source = su;
}
else if (group->conf->update_if)
peer->update_if =
XSTRDUP (MTYPE_PEER_UPDATE_SOURCE, group->conf->update_if);
}
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
return 0;
}
/* peer-group member updates. */
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (! peer->update_source && ! peer->update_if)
continue;
if (peer->update_source)
{
sockunion_free (peer->update_source);
peer->update_source = NULL;
}
if (peer->update_if)
{
XFREE (MTYPE_PEER_UPDATE_SOURCE, peer->update_if);
peer->update_if = NULL;
}
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
return 0;
}
int
peer_default_originate_set (struct peer *peer, afi_t afi, safi_t safi,
const char *rmap)
{
struct peer_group *group;
struct listnode *node, *nnode;
/* Adress family must be activated. */
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
/* Default originate can't be used for peer group memeber. */
if (peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
if (! CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_DEFAULT_ORIGINATE)
|| (rmap && ! peer->default_rmap[afi][safi].name)
|| (rmap && strcmp (rmap, peer->default_rmap[afi][safi].name) != 0))
{
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_DEFAULT_ORIGINATE);
if (rmap)
{
if (peer->default_rmap[afi][safi].name)
free (peer->default_rmap[afi][safi].name);
peer->default_rmap[afi][safi].name = strdup (rmap);
peer->default_rmap[afi][safi].map = route_map_lookup_by_name (rmap);
}
}
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->status == Established && peer->afc_nego[afi][safi])
bgp_default_originate (peer, afi, safi, 0);
return 0;
}
/* peer-group member updates. */
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_DEFAULT_ORIGINATE);
if (rmap)
{
if (peer->default_rmap[afi][safi].name)
free (peer->default_rmap[afi][safi].name);
peer->default_rmap[afi][safi].name = strdup (rmap);
peer->default_rmap[afi][safi].map = route_map_lookup_by_name (rmap);
}
if (peer->status == Established && peer->afc_nego[afi][safi])
bgp_default_originate (peer, afi, safi, 0);
}
return 0;
}
int
peer_default_originate_unset (struct peer *peer, afi_t afi, safi_t safi)
{
struct peer_group *group;
struct listnode *node, *nnode;
/* Adress family must be activated. */
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
/* Default originate can't be used for peer group memeber. */
if (peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_DEFAULT_ORIGINATE))
{
UNSET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_DEFAULT_ORIGINATE);
if (peer->default_rmap[afi][safi].name)
free (peer->default_rmap[afi][safi].name);
peer->default_rmap[afi][safi].name = NULL;
peer->default_rmap[afi][safi].map = NULL;
}
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->status == Established && peer->afc_nego[afi][safi])
bgp_default_originate (peer, afi, safi, 1);
return 0;
}
/* peer-group member updates. */
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
UNSET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_DEFAULT_ORIGINATE);
if (peer->default_rmap[afi][safi].name)
free (peer->default_rmap[afi][safi].name);
peer->default_rmap[afi][safi].name = NULL;
peer->default_rmap[afi][safi].map = NULL;
if (peer->status == Established && peer->afc_nego[afi][safi])
bgp_default_originate (peer, afi, safi, 1);
}
return 0;
}
int
peer_port_set (struct peer *peer, u_int16_t port)
{
peer->port = port;
return 0;
}
int
peer_port_unset (struct peer *peer)
{
peer->port = BGP_PORT_DEFAULT;
return 0;
}
/* neighbor weight. */
int
peer_weight_set (struct peer *peer, u_int16_t weight)
{
struct peer_group *group;
struct listnode *node, *nnode;
SET_FLAG (peer->config, PEER_CONFIG_WEIGHT);
peer->weight = weight;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
/* peer-group member updates. */
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
peer->weight = group->conf->weight;
}
return 0;
}
int
peer_weight_unset (struct peer *peer)
{
struct peer_group *group;
struct listnode *node, *nnode;
/* Set default weight. */
if (peer_group_active (peer))
peer->weight = peer->group->conf->weight;
else
peer->weight = 0;
UNSET_FLAG (peer->config, PEER_CONFIG_WEIGHT);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
/* peer-group member updates. */
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
peer->weight = 0;
}
return 0;
}
int
peer_timers_set (struct peer *peer, u_int32_t keepalive, u_int32_t holdtime)
{
struct peer_group *group;
struct listnode *node, *nnode;
/* Not for peer group memeber. */
if (peer_group_active (peer))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
/* keepalive value check. */
if (keepalive > 65535)
return BGP_ERR_INVALID_VALUE;
/* Holdtime value check. */
if (holdtime > 65535)
return BGP_ERR_INVALID_VALUE;
/* Holdtime value must be either 0 or greater than 3. */
if (holdtime < 3 && holdtime != 0)
return BGP_ERR_INVALID_VALUE;
/* Set value to the configuration. */
SET_FLAG (peer->config, PEER_CONFIG_TIMER);
peer->holdtime = holdtime;
peer->keepalive = (keepalive < holdtime / 3 ? keepalive : holdtime / 3);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
/* peer-group member updates. */
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
SET_FLAG (peer->config, PEER_CONFIG_TIMER);
peer->holdtime = group->conf->holdtime;
peer->keepalive = group->conf->keepalive;
}
return 0;
}
int
peer_timers_unset (struct peer *peer)
{
struct peer_group *group;
struct listnode *node, *nnode;
if (peer_group_active (peer))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
/* Clear configuration. */
UNSET_FLAG (peer->config, PEER_CONFIG_TIMER);
peer->keepalive = 0;
peer->holdtime = 0;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
/* peer-group member updates. */
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
UNSET_FLAG (peer->config, PEER_CONFIG_TIMER);
peer->holdtime = 0;
peer->keepalive = 0;
}
return 0;
}
int
peer_timers_connect_set (struct peer *peer, u_int32_t connect)
{
if (peer_group_active (peer))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
if (connect > 65535)
return BGP_ERR_INVALID_VALUE;
/* Set value to the configuration. */
SET_FLAG (peer->config, PEER_CONFIG_CONNECT);
peer->connect = connect;
/* Set value to timer setting. */
peer->v_connect = connect;
return 0;
}
int
peer_timers_connect_unset (struct peer *peer)
{
if (peer_group_active (peer))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
/* Clear configuration. */
UNSET_FLAG (peer->config, PEER_CONFIG_CONNECT);
peer->connect = 0;
/* Set timer setting to default value. */
peer->v_connect = BGP_DEFAULT_CONNECT_RETRY;
return 0;
}
int
peer_advertise_interval_set (struct peer *peer, u_int32_t routeadv)
{
if (peer_group_active (peer))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
if (routeadv > 600)
return BGP_ERR_INVALID_VALUE;
SET_FLAG (peer->config, PEER_CONFIG_ROUTEADV);
peer->routeadv = routeadv;
peer->v_routeadv = routeadv;
return 0;
}
int
peer_advertise_interval_unset (struct peer *peer)
{
if (peer_group_active (peer))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
UNSET_FLAG (peer->config, PEER_CONFIG_ROUTEADV);
peer->routeadv = 0;
if (peer_sort (peer) == BGP_PEER_IBGP)
peer->v_routeadv = BGP_DEFAULT_IBGP_ROUTEADV;
else
peer->v_routeadv = BGP_DEFAULT_EBGP_ROUTEADV;
return 0;
}
/* neighbor interface */
int
peer_interface_set (struct peer *peer, const char *str)
{
if (peer->ifname)
free (peer->ifname);
peer->ifname = strdup (str);
return 0;
}
int
peer_interface_unset (struct peer *peer)
{
if (peer->ifname)
free (peer->ifname);
peer->ifname = NULL;
return 0;
}
/* Allow-as in. */
int
peer_allowas_in_set (struct peer *peer, afi_t afi, safi_t safi, int allow_num)
{
struct peer_group *group;
struct listnode *node, *nnode;
if (allow_num < 1 || allow_num > 10)
return BGP_ERR_INVALID_VALUE;
if (peer->allowas_in[afi][safi] != allow_num)
{
peer->allowas_in[afi][safi] = allow_num;
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN);
peer_change_action (peer, afi, safi, peer_change_reset_in);
}
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (peer->allowas_in[afi][safi] != allow_num)
{
peer->allowas_in[afi][safi] = allow_num;
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN);
peer_change_action (peer, afi, safi, peer_change_reset_in);
}
}
return 0;
}
int
peer_allowas_in_unset (struct peer *peer, afi_t afi, safi_t safi)
{
struct peer_group *group;
struct listnode *node, *nnode;
if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN))
{
peer->allowas_in[afi][safi] = 0;
peer_af_flag_unset (peer, afi, safi, PEER_FLAG_ALLOWAS_IN);
}
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN))
{
peer->allowas_in[afi][safi] = 0;
peer_af_flag_unset (peer, afi, safi, PEER_FLAG_ALLOWAS_IN);
}
}
return 0;
}
int
peer_local_as_set (struct peer *peer, as_t as, int no_prepend)
{
struct bgp *bgp = peer->bgp;
struct peer_group *group;
struct listnode *node, *nnode;
if (peer_sort (peer) != BGP_PEER_EBGP
&& peer_sort (peer) != BGP_PEER_INTERNAL)
return BGP_ERR_LOCAL_AS_ALLOWED_ONLY_FOR_EBGP;
if (bgp->as == as)
return BGP_ERR_CANNOT_HAVE_LOCAL_AS_SAME_AS;
if (peer_group_active (peer))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
if (peer->change_local_as == as &&
((CHECK_FLAG (peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND) && no_prepend)
|| (! CHECK_FLAG (peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND) && ! no_prepend)))
return 0;
peer->change_local_as = as;
if (no_prepend)
SET_FLAG (peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND);
else
UNSET_FLAG (peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_LOCAL_AS_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
return 0;
}
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
peer->change_local_as = as;
if (no_prepend)
SET_FLAG (peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND);
else
UNSET_FLAG (peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND);
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_LOCAL_AS_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
return 0;
}
int
peer_local_as_unset (struct peer *peer)
{
struct peer_group *group;
struct listnode *node, *nnode;
if (peer_group_active (peer))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
if (! peer->change_local_as)
return 0;
peer->change_local_as = 0;
UNSET_FLAG (peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_LOCAL_AS_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
return 0;
}
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
peer->change_local_as = 0;
UNSET_FLAG (peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND);
if (peer->status == Established)
{
peer->last_reset = PEER_DOWN_LOCAL_AS_CHANGE;
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_CONFIG_CHANGE);
}
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
return 0;
}
/* Set password for authenticating with the peer. */
int
peer_password_set (struct peer *peer, const char *password)
{
struct listnode *nn, *nnode;
int len = password ? strlen(password) : 0;
int ret = BGP_SUCCESS;
if ((len < PEER_PASSWORD_MINLEN) || (len > PEER_PASSWORD_MAXLEN))
return BGP_ERR_INVALID_VALUE;
if (peer->password && strcmp (peer->password, password) == 0
&& ! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
if (peer->password)
XFREE (MTYPE_PEER_PASSWORD, peer->password);
peer->password = XSTRDUP (MTYPE_PEER_PASSWORD, password);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->status == Established)
bgp_notify_send (peer, BGP_NOTIFY_CEASE, BGP_NOTIFY_CEASE_CONFIG_CHANGE);
else
BGP_EVENT_ADD (peer, BGP_Stop);
return (bgp_md5_set (peer) >= 0) ? BGP_SUCCESS : BGP_ERR_TCPSIG_FAILED;
}
for (ALL_LIST_ELEMENTS (peer->group->peer, nn, nnode, peer))
{
if (peer->password && strcmp (peer->password, password) == 0)
continue;
if (peer->password)
XFREE (MTYPE_PEER_PASSWORD, peer->password);
peer->password = XSTRDUP(MTYPE_PEER_PASSWORD, password);
if (peer->status == Established)
bgp_notify_send (peer, BGP_NOTIFY_CEASE, BGP_NOTIFY_CEASE_CONFIG_CHANGE);
else
BGP_EVENT_ADD (peer, BGP_Stop);
if (bgp_md5_set (peer) < 0)
ret = BGP_ERR_TCPSIG_FAILED;
}
return ret;
}
int
peer_password_unset (struct peer *peer)
{
struct listnode *nn, *nnode;
if (!peer->password
&& !CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
if (!CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer_group_active (peer)
&& peer->group->conf->password
&& strcmp (peer->group->conf->password, peer->password) == 0)
return BGP_ERR_PEER_GROUP_HAS_THE_FLAG;
if (peer->status == Established)
bgp_notify_send (peer, BGP_NOTIFY_CEASE, BGP_NOTIFY_CEASE_CONFIG_CHANGE);
else
BGP_EVENT_ADD (peer, BGP_Stop);
if (peer->password)
XFREE (MTYPE_PEER_PASSWORD, peer->password);
peer->password = NULL;
bgp_md5_set (peer);
return 0;
}
XFREE (MTYPE_PEER_PASSWORD, peer->password);
peer->password = NULL;
for (ALL_LIST_ELEMENTS (peer->group->peer, nn, nnode, peer))
{
if (!peer->password)
continue;
if (peer->status == Established)
bgp_notify_send (peer, BGP_NOTIFY_CEASE, BGP_NOTIFY_CEASE_CONFIG_CHANGE);
else
BGP_EVENT_ADD (peer, BGP_Stop);
XFREE (MTYPE_PEER_PASSWORD, peer->password);
peer->password = NULL;
bgp_md5_set (peer);
}
return 0;
}
/* Set distribute list to the peer. */
int
peer_distribute_set (struct peer *peer, afi_t afi, safi_t safi, int direct,
const char *name)
{
struct bgp_filter *filter;
struct peer_group *group;
struct listnode *node, *nnode;
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
if (direct != FILTER_IN && direct != FILTER_OUT)
return BGP_ERR_INVALID_VALUE;
if (direct == FILTER_OUT && peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
filter = &peer->filter[afi][safi];
if (filter->plist[direct].name)
return BGP_ERR_PEER_FILTER_CONFLICT;
if (filter->dlist[direct].name)
free (filter->dlist[direct].name);
filter->dlist[direct].name = strdup (name);
filter->dlist[direct].alist = access_list_lookup (afi, name);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
filter = &peer->filter[afi][safi];
if (! peer->af_group[afi][safi])
continue;
if (filter->dlist[direct].name)
free (filter->dlist[direct].name);
filter->dlist[direct].name = strdup (name);
filter->dlist[direct].alist = access_list_lookup (afi, name);
}
return 0;
}
int
peer_distribute_unset (struct peer *peer, afi_t afi, safi_t safi, int direct)
{
struct bgp_filter *filter;
struct bgp_filter *gfilter;
struct peer_group *group;
struct listnode *node, *nnode;
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
if (direct != FILTER_IN && direct != FILTER_OUT)
return BGP_ERR_INVALID_VALUE;
if (direct == FILTER_OUT && peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
filter = &peer->filter[afi][safi];
/* apply peer-group filter */
if (peer->af_group[afi][safi])
{
gfilter = &peer->group->conf->filter[afi][safi];
if (gfilter->dlist[direct].name)
{
if (filter->dlist[direct].name)
free (filter->dlist[direct].name);
filter->dlist[direct].name = strdup (gfilter->dlist[direct].name);
filter->dlist[direct].alist = gfilter->dlist[direct].alist;
return 0;
}
}
if (filter->dlist[direct].name)
free (filter->dlist[direct].name);
filter->dlist[direct].name = NULL;
filter->dlist[direct].alist = NULL;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
filter = &peer->filter[afi][safi];
if (! peer->af_group[afi][safi])
continue;
if (filter->dlist[direct].name)
free (filter->dlist[direct].name);
filter->dlist[direct].name = NULL;
filter->dlist[direct].alist = NULL;
}
return 0;
}
/* Update distribute list. */
static void
peer_distribute_update (struct access_list *access)
{
afi_t afi;
safi_t safi;
int direct;
struct listnode *mnode, *mnnode;
struct listnode *node, *nnode;
struct bgp *bgp;
struct peer *peer;
struct peer_group *group;
struct bgp_filter *filter;
for (ALL_LIST_ELEMENTS (bm->bgp, mnode, mnnode, bgp))
{
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++)
{
filter = &peer->filter[afi][safi];
for (direct = FILTER_IN; direct < FILTER_MAX; direct++)
{
if (filter->dlist[direct].name)
filter->dlist[direct].alist =
access_list_lookup (afi, filter->dlist[direct].name);
else
filter->dlist[direct].alist = NULL;
}
}
}
for (ALL_LIST_ELEMENTS (bgp->group, node, nnode, group))
{
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++)
{
filter = &group->conf->filter[afi][safi];
for (direct = FILTER_IN; direct < FILTER_MAX; direct++)
{
if (filter->dlist[direct].name)
filter->dlist[direct].alist =
access_list_lookup (afi, filter->dlist[direct].name);
else
filter->dlist[direct].alist = NULL;
}
}
}
}
}
/* Set prefix list to the peer. */
int
peer_prefix_list_set (struct peer *peer, afi_t afi, safi_t safi, int direct,
const char *name)
{
struct bgp_filter *filter;
struct peer_group *group;
struct listnode *node, *nnode;
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
if (direct != FILTER_IN && direct != FILTER_OUT)
return BGP_ERR_INVALID_VALUE;
if (direct == FILTER_OUT && peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
filter = &peer->filter[afi][safi];
if (filter->dlist[direct].name)
return BGP_ERR_PEER_FILTER_CONFLICT;
if (filter->plist[direct].name)
free (filter->plist[direct].name);
filter->plist[direct].name = strdup (name);
filter->plist[direct].plist = prefix_list_lookup (afi, name);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
filter = &peer->filter[afi][safi];
if (! peer->af_group[afi][safi])
continue;
if (filter->plist[direct].name)
free (filter->plist[direct].name);
filter->plist[direct].name = strdup (name);
filter->plist[direct].plist = prefix_list_lookup (afi, name);
}
return 0;
}
int
peer_prefix_list_unset (struct peer *peer, afi_t afi, safi_t safi, int direct)
{
struct bgp_filter *filter;
struct bgp_filter *gfilter;
struct peer_group *group;
struct listnode *node, *nnode;
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
if (direct != FILTER_IN && direct != FILTER_OUT)
return BGP_ERR_INVALID_VALUE;
if (direct == FILTER_OUT && peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
filter = &peer->filter[afi][safi];
/* apply peer-group filter */
if (peer->af_group[afi][safi])
{
gfilter = &peer->group->conf->filter[afi][safi];
if (gfilter->plist[direct].name)
{
if (filter->plist[direct].name)
free (filter->plist[direct].name);
filter->plist[direct].name = strdup (gfilter->plist[direct].name);
filter->plist[direct].plist = gfilter->plist[direct].plist;
return 0;
}
}
if (filter->plist[direct].name)
free (filter->plist[direct].name);
filter->plist[direct].name = NULL;
filter->plist[direct].plist = NULL;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
filter = &peer->filter[afi][safi];
if (! peer->af_group[afi][safi])
continue;
if (filter->plist[direct].name)
free (filter->plist[direct].name);
filter->plist[direct].name = NULL;
filter->plist[direct].plist = NULL;
}
return 0;
}
/* Update prefix-list list. */
static void
peer_prefix_list_update (struct prefix_list *plist)
{
struct listnode *mnode, *mnnode;
struct listnode *node, *nnode;
struct bgp *bgp;
struct peer *peer;
struct peer_group *group;
struct bgp_filter *filter;
afi_t afi;
safi_t safi;
int direct;
for (ALL_LIST_ELEMENTS (bm->bgp, mnode, mnnode, bgp))
{
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++)
{
filter = &peer->filter[afi][safi];
for (direct = FILTER_IN; direct < FILTER_MAX; direct++)
{
if (filter->plist[direct].name)
filter->plist[direct].plist =
prefix_list_lookup (afi, filter->plist[direct].name);
else
filter->plist[direct].plist = NULL;
}
}
}
for (ALL_LIST_ELEMENTS (bgp->group, node, nnode, group))
{
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++)
{
filter = &group->conf->filter[afi][safi];
for (direct = FILTER_IN; direct < FILTER_MAX; direct++)
{
if (filter->plist[direct].name)
filter->plist[direct].plist =
prefix_list_lookup (afi, filter->plist[direct].name);
else
filter->plist[direct].plist = NULL;
}
}
}
}
}
int
peer_aslist_set (struct peer *peer, afi_t afi, safi_t safi, int direct,
const char *name)
{
struct bgp_filter *filter;
struct peer_group *group;
struct listnode *node, *nnode;
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
if (direct != FILTER_IN && direct != FILTER_OUT)
return BGP_ERR_INVALID_VALUE;
if (direct == FILTER_OUT && peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
filter = &peer->filter[afi][safi];
if (filter->aslist[direct].name)
free (filter->aslist[direct].name);
filter->aslist[direct].name = strdup (name);
filter->aslist[direct].aslist = as_list_lookup (name);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
filter = &peer->filter[afi][safi];
if (! peer->af_group[afi][safi])
continue;
if (filter->aslist[direct].name)
free (filter->aslist[direct].name);
filter->aslist[direct].name = strdup (name);
filter->aslist[direct].aslist = as_list_lookup (name);
}
return 0;
}
int
peer_aslist_unset (struct peer *peer,afi_t afi, safi_t safi, int direct)
{
struct bgp_filter *filter;
struct bgp_filter *gfilter;
struct peer_group *group;
struct listnode *node, *nnode;
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
if (direct != FILTER_IN && direct != FILTER_OUT)
return BGP_ERR_INVALID_VALUE;
if (direct == FILTER_OUT && peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
filter = &peer->filter[afi][safi];
/* apply peer-group filter */
if (peer->af_group[afi][safi])
{
gfilter = &peer->group->conf->filter[afi][safi];
if (gfilter->aslist[direct].name)
{
if (filter->aslist[direct].name)
free (filter->aslist[direct].name);
filter->aslist[direct].name = strdup (gfilter->aslist[direct].name);
filter->aslist[direct].aslist = gfilter->aslist[direct].aslist;
return 0;
}
}
if (filter->aslist[direct].name)
free (filter->aslist[direct].name);
filter->aslist[direct].name = NULL;
filter->aslist[direct].aslist = NULL;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
filter = &peer->filter[afi][safi];
if (! peer->af_group[afi][safi])
continue;
if (filter->aslist[direct].name)
free (filter->aslist[direct].name);
filter->aslist[direct].name = NULL;
filter->aslist[direct].aslist = NULL;
}
return 0;
}
static void
peer_aslist_update (void)
{
afi_t afi;
safi_t safi;
int direct;
struct listnode *mnode, *mnnode;
struct listnode *node, *nnode;
struct bgp *bgp;
struct peer *peer;
struct peer_group *group;
struct bgp_filter *filter;
for (ALL_LIST_ELEMENTS (bm->bgp, mnode, mnnode, bgp))
{
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++)
{
filter = &peer->filter[afi][safi];
for (direct = FILTER_IN; direct < FILTER_MAX; direct++)
{
if (filter->aslist[direct].name)
filter->aslist[direct].aslist =
as_list_lookup (filter->aslist[direct].name);
else
filter->aslist[direct].aslist = NULL;
}
}
}
for (ALL_LIST_ELEMENTS (bgp->group, node, nnode, group))
{
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++)
{
filter = &group->conf->filter[afi][safi];
for (direct = FILTER_IN; direct < FILTER_MAX; direct++)
{
if (filter->aslist[direct].name)
filter->aslist[direct].aslist =
as_list_lookup (filter->aslist[direct].name);
else
filter->aslist[direct].aslist = NULL;
}
}
}
}
}
/* Set route-map to the peer. */
int
peer_route_map_set (struct peer *peer, afi_t afi, safi_t safi, int direct,
const char *name)
{
struct bgp_filter *filter;
struct peer_group *group;
struct listnode *node, *nnode;
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
if (direct != RMAP_IN && direct != RMAP_OUT &&
direct != RMAP_IMPORT && direct != RMAP_EXPORT)
return BGP_ERR_INVALID_VALUE;
if ( (direct == RMAP_OUT || direct == RMAP_IMPORT)
&& peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
filter = &peer->filter[afi][safi];
if (filter->map[direct].name)
free (filter->map[direct].name);
filter->map[direct].name = strdup (name);
filter->map[direct].map = route_map_lookup_by_name (name);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
filter = &peer->filter[afi][safi];
if (! peer->af_group[afi][safi])
continue;
if (filter->map[direct].name)
free (filter->map[direct].name);
filter->map[direct].name = strdup (name);
filter->map[direct].map = route_map_lookup_by_name (name);
}
return 0;
}
/* Unset route-map from the peer. */
int
peer_route_map_unset (struct peer *peer, afi_t afi, safi_t safi, int direct)
{
struct bgp_filter *filter;
struct bgp_filter *gfilter;
struct peer_group *group;
struct listnode *node, *nnode;
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
if (direct != RMAP_IN && direct != RMAP_OUT &&
direct != RMAP_IMPORT && direct != RMAP_EXPORT)
return BGP_ERR_INVALID_VALUE;
if ( (direct == RMAP_OUT || direct == RMAP_IMPORT)
&& peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
filter = &peer->filter[afi][safi];
/* apply peer-group filter */
if (peer->af_group[afi][safi])
{
gfilter = &peer->group->conf->filter[afi][safi];
if (gfilter->map[direct].name)
{
if (filter->map[direct].name)
free (filter->map[direct].name);
filter->map[direct].name = strdup (gfilter->map[direct].name);
filter->map[direct].map = gfilter->map[direct].map;
return 0;
}
}
if (filter->map[direct].name)
free (filter->map[direct].name);
filter->map[direct].name = NULL;
filter->map[direct].map = NULL;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
filter = &peer->filter[afi][safi];
if (! peer->af_group[afi][safi])
continue;
if (filter->map[direct].name)
free (filter->map[direct].name);
filter->map[direct].name = NULL;
filter->map[direct].map = NULL;
}
return 0;
}
/* Set unsuppress-map to the peer. */
int
peer_unsuppress_map_set (struct peer *peer, afi_t afi, safi_t safi,
const char *name)
{
struct bgp_filter *filter;
struct peer_group *group;
struct listnode *node, *nnode;
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
if (peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
filter = &peer->filter[afi][safi];
if (filter->usmap.name)
free (filter->usmap.name);
filter->usmap.name = strdup (name);
filter->usmap.map = route_map_lookup_by_name (name);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
filter = &peer->filter[afi][safi];
if (! peer->af_group[afi][safi])
continue;
if (filter->usmap.name)
free (filter->usmap.name);
filter->usmap.name = strdup (name);
filter->usmap.map = route_map_lookup_by_name (name);
}
return 0;
}
/* Unset route-map from the peer. */
int
peer_unsuppress_map_unset (struct peer *peer, afi_t afi, safi_t safi)
{
struct bgp_filter *filter;
struct peer_group *group;
struct listnode *node, *nnode;
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
if (peer_is_group_member (peer, afi, safi))
return BGP_ERR_INVALID_FOR_PEER_GROUP_MEMBER;
filter = &peer->filter[afi][safi];
if (filter->usmap.name)
free (filter->usmap.name);
filter->usmap.name = NULL;
filter->usmap.map = NULL;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
filter = &peer->filter[afi][safi];
if (! peer->af_group[afi][safi])
continue;
if (filter->usmap.name)
free (filter->usmap.name);
filter->usmap.name = NULL;
filter->usmap.map = NULL;
}
return 0;
}
int
peer_maximum_prefix_set (struct peer *peer, afi_t afi, safi_t safi,
u_int32_t max, u_char threshold,
int warning, u_int16_t restart)
{
struct peer_group *group;
struct listnode *node, *nnode;
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX);
peer->pmax[afi][safi] = max;
peer->pmax_threshold[afi][safi] = threshold;
peer->pmax_restart[afi][safi] = restart;
if (warning)
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING);
else
UNSET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING);
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (! peer->af_group[afi][safi])
continue;
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX);
peer->pmax[afi][safi] = max;
peer->pmax_threshold[afi][safi] = threshold;
peer->pmax_restart[afi][safi] = restart;
if (warning)
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING);
else
UNSET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING);
}
return 0;
}
int
peer_maximum_prefix_unset (struct peer *peer, afi_t afi, safi_t safi)
{
struct peer_group *group;
struct listnode *node, *nnode;
if (! peer->afc[afi][safi])
return BGP_ERR_PEER_INACTIVE;
/* apply peer-group config */
if (peer->af_group[afi][safi])
{
if (CHECK_FLAG (peer->group->conf->af_flags[afi][safi],
PEER_FLAG_MAX_PREFIX))
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX);
else
UNSET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX);
if (CHECK_FLAG (peer->group->conf->af_flags[afi][safi],
PEER_FLAG_MAX_PREFIX_WARNING))
SET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING);
else
UNSET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING);
peer->pmax[afi][safi] = peer->group->conf->pmax[afi][safi];
peer->pmax_threshold[afi][safi] = peer->group->conf->pmax_threshold[afi][safi];
peer->pmax_restart[afi][safi] = peer->group->conf->pmax_restart[afi][safi];
return 0;
}
UNSET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX);
UNSET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING);
peer->pmax[afi][safi] = 0;
peer->pmax_threshold[afi][safi] = 0;
peer->pmax_restart[afi][safi] = 0;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
return 0;
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (! peer->af_group[afi][safi])
continue;
UNSET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX);
UNSET_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING);
peer->pmax[afi][safi] = 0;
peer->pmax_threshold[afi][safi] = 0;
peer->pmax_restart[afi][safi] = 0;
}
return 0;
}
/* Set # of hops between us and BGP peer. */
int
peer_ttl_security_hops_set (struct peer *peer, int gtsm_hops)
{
struct peer_group *group;
struct listnode *node, *nnode;
struct peer *peer1;
int ret;
zlog_debug ("peer_ttl_security_hops_set: set gtsm_hops to %d for %s", gtsm_hops, peer->host);
if (peer_sort (peer) == BGP_PEER_IBGP)
return BGP_ERR_NO_IBGP_WITH_TTLHACK;
/* We cannot configure ttl-security hops when ebgp-multihop is already
set. For non peer-groups, the check is simple. For peer-groups, it's
slightly messy, because we need to check both the peer-group structure
and all peer-group members for any trace of ebgp-multihop configuration
before actually applying the ttl-security rules. Cisco really made a
mess of this configuration parameter, and OpenBGPD got it right.
*/
if (peer->gtsm_hops == 0) {
if (CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
group = peer->group;
if (group->conf->ttl != 1)
return BGP_ERR_NO_EBGP_MULTIHOP_WITH_TTLHACK;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer1))
{
if (peer_sort (peer1) == BGP_PEER_IBGP)
continue;
if (peer1->ttl != 1)
return BGP_ERR_NO_EBGP_MULTIHOP_WITH_TTLHACK;
}
}
else
{
if (peer->ttl != 1)
return BGP_ERR_NO_EBGP_MULTIHOP_WITH_TTLHACK;
}
/* specify MAXTTL on outgoing packets */
ret = peer_ebgp_multihop_set (peer, MAXTTL);
if (ret != 0)
return ret;
}
peer->gtsm_hops = gtsm_hops;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->fd >= 0 && peer_sort (peer) != BGP_PEER_IBGP)
sockopt_minttl (peer->su.sa.sa_family, peer->fd, MAXTTL + 1 - gtsm_hops);
}
else
{
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (peer_sort (peer) == BGP_PEER_IBGP)
continue;
peer->gtsm_hops = group->conf->gtsm_hops;
/* Change setting of existing peer
* established then change value (may break connectivity)
* not established yet (teardown session and restart)
* no session then do nothing (will get handled by next connection)
*/
if (peer->status == Established)
{
if (peer->fd >= 0 && peer->gtsm_hops != 0)
sockopt_minttl (peer->su.sa.sa_family, peer->fd,
MAXTTL + 1 - peer->gtsm_hops);
}
else if (peer->status < Established)
{
if (BGP_DEBUG (events, EVENTS))
zlog_debug ("%s Min-ttl changed", peer->host);
BGP_EVENT_ADD (peer, BGP_Stop);
}
}
}
return 0;
}
int
peer_ttl_security_hops_unset (struct peer *peer)
{
struct peer_group *group;
struct listnode *node, *nnode;
struct peer *opeer;
zlog_debug ("peer_ttl_security_hops_unset: set gtsm_hops to zero for %s", peer->host);
if (peer_sort (peer) == BGP_PEER_IBGP)
return 0;
/* if a peer-group member, then reset to peer-group default rather than 0 */
if (peer_group_active (peer))
peer->gtsm_hops = peer->group->conf->gtsm_hops;
else
peer->gtsm_hops = 0;
opeer = peer;
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
{
if (peer->fd >= 0 && peer_sort (peer) != BGP_PEER_IBGP)
sockopt_minttl (peer->su.sa.sa_family, peer->fd, 0);
}
else
{
group = peer->group;
for (ALL_LIST_ELEMENTS (group->peer, node, nnode, peer))
{
if (peer_sort (peer) == BGP_PEER_IBGP)
continue;
peer->gtsm_hops = 0;
if (peer->fd >= 0)
sockopt_minttl (peer->su.sa.sa_family, peer->fd, 0);
}
}
return peer_ebgp_multihop_unset (opeer);
}
int
peer_clear (struct peer *peer)
{
if (! CHECK_FLAG (peer->flags, PEER_FLAG_SHUTDOWN))
{
if (CHECK_FLAG (peer->sflags, PEER_STATUS_PREFIX_OVERFLOW))
{
UNSET_FLAG (peer->sflags, PEER_STATUS_PREFIX_OVERFLOW);
if (peer->t_pmax_restart)
{
BGP_TIMER_OFF (peer->t_pmax_restart);
if (BGP_DEBUG (events, EVENTS))
zlog_debug ("%s Maximum-prefix restart timer canceled",
peer->host);
}
BGP_EVENT_ADD (peer, BGP_Start);
return 0;
}
peer->v_start = BGP_INIT_START_TIMER;
if (peer->status == Established)
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_ADMIN_RESET);
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
return 0;
}
int
peer_clear_soft (struct peer *peer, afi_t afi, safi_t safi,
enum bgp_clear_type stype)
{
if (peer->status != Established)
return 0;
if (! peer->afc[afi][safi])
return BGP_ERR_AF_UNCONFIGURED;
if (stype == BGP_CLEAR_SOFT_RSCLIENT)
{
if (! CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_RSERVER_CLIENT))
return 0;
bgp_check_local_routes_rsclient (peer, afi, safi);
bgp_soft_reconfig_rsclient (peer, afi, safi);
}
if (stype == BGP_CLEAR_SOFT_OUT || stype == BGP_CLEAR_SOFT_BOTH)
bgp_announce_route (peer, afi, safi);
if (stype == BGP_CLEAR_SOFT_IN_ORF_PREFIX)
{
if (CHECK_FLAG (peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_ADV)
&& (CHECK_FLAG (peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_RM_RCV)
|| CHECK_FLAG (peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_RM_OLD_RCV)))
{
struct bgp_filter *filter = &peer->filter[afi][safi];
u_char prefix_type;
if (CHECK_FLAG (peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_RM_RCV))
prefix_type = ORF_TYPE_PREFIX;
else
prefix_type = ORF_TYPE_PREFIX_OLD;
if (filter->plist[FILTER_IN].plist)
{
if (CHECK_FLAG (peer->af_sflags[afi][safi], PEER_STATUS_ORF_PREFIX_SEND))
bgp_route_refresh_send (peer, afi, safi,
prefix_type, REFRESH_DEFER, 1);
bgp_route_refresh_send (peer, afi, safi, prefix_type,
REFRESH_IMMEDIATE, 0);
}
else
{
if (CHECK_FLAG (peer->af_sflags[afi][safi], PEER_STATUS_ORF_PREFIX_SEND))
bgp_route_refresh_send (peer, afi, safi,
prefix_type, REFRESH_IMMEDIATE, 1);
else
bgp_route_refresh_send (peer, afi, safi, 0, 0, 0);
}
return 0;
}
}
if (stype == BGP_CLEAR_SOFT_IN || stype == BGP_CLEAR_SOFT_BOTH
|| stype == BGP_CLEAR_SOFT_IN_ORF_PREFIX)
{
/* If neighbor has soft reconfiguration inbound flag.
Use Adj-RIB-In database. */
if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_SOFT_RECONFIG))
bgp_soft_reconfig_in (peer, afi, safi);
else
{
/* If neighbor has route refresh capability, send route refresh
message to the peer. */
if (CHECK_FLAG (peer->cap, PEER_CAP_REFRESH_OLD_RCV)
|| CHECK_FLAG (peer->cap, PEER_CAP_REFRESH_NEW_RCV))
bgp_route_refresh_send (peer, afi, safi, 0, 0, 0);
else
return BGP_ERR_SOFT_RECONFIG_UNCONFIGURED;
}
}
return 0;
}
/* Display peer uptime.*/
/* XXX: why does this function return char * when it takes buffer? */
char *
peer_uptime (time_t uptime2, char *buf, size_t len)
{
time_t uptime1;
struct tm *tm;
/* Check buffer length. */
if (len < BGP_UPTIME_LEN)
{
zlog_warn ("peer_uptime (): buffer shortage %lu", (u_long)len);
/* XXX: should return status instead of buf... */
snprintf (buf, len, "<error> ");
return buf;
}
/* If there is no connection has been done before print `never'. */
if (uptime2 == 0)
{
snprintf (buf, len, "never ");
return buf;
}
/* Get current time. */
uptime1 = bgp_clock ();
uptime1 -= uptime2;
tm = gmtime (&uptime1);
/* Making formatted timer strings. */
#define ONE_DAY_SECOND 60*60*24
#define ONE_WEEK_SECOND 60*60*24*7
if (uptime1 < ONE_DAY_SECOND)
snprintf (buf, len, "%02d:%02d:%02d",
tm->tm_hour, tm->tm_min, tm->tm_sec);
else if (uptime1 < ONE_WEEK_SECOND)
snprintf (buf, len, "%dd%02dh%02dm",
tm->tm_yday, tm->tm_hour, tm->tm_min);
else
snprintf (buf, len, "%02dw%dd%02dh",
tm->tm_yday/7, tm->tm_yday - ((tm->tm_yday/7) * 7), tm->tm_hour);
return buf;
}
static void
bgp_config_write_filter (struct vty *vty, struct peer *peer,
afi_t afi, safi_t safi)
{
struct bgp_filter *filter;
struct bgp_filter *gfilter = NULL;
char *addr;
int in = FILTER_IN;
int out = FILTER_OUT;
addr = peer->host;
filter = &peer->filter[afi][safi];
if (peer->af_group[afi][safi])
gfilter = &peer->group->conf->filter[afi][safi];
/* distribute-list. */
if (filter->dlist[in].name)
if (! gfilter || ! gfilter->dlist[in].name
|| strcmp (filter->dlist[in].name, gfilter->dlist[in].name) != 0)
vty_out (vty, " neighbor %s distribute-list %s in%s", addr,
filter->dlist[in].name, VTY_NEWLINE);
if (filter->dlist[out].name && ! gfilter)
vty_out (vty, " neighbor %s distribute-list %s out%s", addr,
filter->dlist[out].name, VTY_NEWLINE);
/* prefix-list. */
if (filter->plist[in].name)
if (! gfilter || ! gfilter->plist[in].name
|| strcmp (filter->plist[in].name, gfilter->plist[in].name) != 0)
vty_out (vty, " neighbor %s prefix-list %s in%s", addr,
filter->plist[in].name, VTY_NEWLINE);
if (filter->plist[out].name && ! gfilter)
vty_out (vty, " neighbor %s prefix-list %s out%s", addr,
filter->plist[out].name, VTY_NEWLINE);
/* route-map. */
if (filter->map[RMAP_IN].name)
if (! gfilter || ! gfilter->map[RMAP_IN].name
|| strcmp (filter->map[RMAP_IN].name, gfilter->map[RMAP_IN].name) != 0)
vty_out (vty, " neighbor %s route-map %s in%s", addr,
filter->map[RMAP_IN].name, VTY_NEWLINE);
if (filter->map[RMAP_OUT].name && ! gfilter)
vty_out (vty, " neighbor %s route-map %s out%s", addr,
filter->map[RMAP_OUT].name, VTY_NEWLINE);
if (filter->map[RMAP_IMPORT].name && ! gfilter)
vty_out (vty, " neighbor %s route-map %s import%s", addr,
filter->map[RMAP_IMPORT].name, VTY_NEWLINE);
if (filter->map[RMAP_EXPORT].name)
if (! gfilter || ! gfilter->map[RMAP_EXPORT].name
|| strcmp (filter->map[RMAP_EXPORT].name,
gfilter->map[RMAP_EXPORT].name) != 0)
vty_out (vty, " neighbor %s route-map %s export%s", addr,
filter->map[RMAP_EXPORT].name, VTY_NEWLINE);
/* unsuppress-map */
if (filter->usmap.name && ! gfilter)
vty_out (vty, " neighbor %s unsuppress-map %s%s", addr,
filter->usmap.name, VTY_NEWLINE);
/* filter-list. */
if (filter->aslist[in].name)
if (! gfilter || ! gfilter->aslist[in].name
|| strcmp (filter->aslist[in].name, gfilter->aslist[in].name) != 0)
vty_out (vty, " neighbor %s filter-list %s in%s", addr,
filter->aslist[in].name, VTY_NEWLINE);
if (filter->aslist[out].name && ! gfilter)
vty_out (vty, " neighbor %s filter-list %s out%s", addr,
filter->aslist[out].name, VTY_NEWLINE);
}
/* BGP peer configuration display function. */
static void
bgp_config_write_peer (struct vty *vty, struct bgp *bgp,
struct peer *peer, afi_t afi, safi_t safi)
{
struct peer *g_peer = NULL;
char buf[SU_ADDRSTRLEN];
char *addr;
addr = peer->host;
if (peer_group_active (peer))
g_peer = peer->group->conf;
/************************************
****** Global to the neighbor ******
************************************/
if (afi == AFI_IP && safi == SAFI_UNICAST)
{
/* remote-as. */
if (! peer_group_active (peer))
{
if (CHECK_FLAG (peer->sflags, PEER_STATUS_GROUP))
vty_out (vty, " neighbor %s peer-group%s", addr,
VTY_NEWLINE);
if (peer->as)
vty_out (vty, " neighbor %s remote-as %u%s", addr, peer->as,
VTY_NEWLINE);
}
else
{
if (! g_peer->as)
vty_out (vty, " neighbor %s remote-as %u%s", addr, peer->as,
VTY_NEWLINE);
if (peer->af_group[AFI_IP][SAFI_UNICAST])
vty_out (vty, " neighbor %s peer-group %s%s", addr,
peer->group->name, VTY_NEWLINE);
}
/* local-as. */
if (peer->change_local_as)
if (! peer_group_active (peer))
vty_out (vty, " neighbor %s local-as %u%s%s", addr,
peer->change_local_as,
CHECK_FLAG (peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND) ?
" no-prepend" : "", VTY_NEWLINE);
/* Description. */
if (peer->desc)
vty_out (vty, " neighbor %s description %s%s", addr, peer->desc,
VTY_NEWLINE);
/* Shutdown. */
if (CHECK_FLAG (peer->flags, PEER_FLAG_SHUTDOWN))
if (! peer_group_active (peer) ||
! CHECK_FLAG (g_peer->flags, PEER_FLAG_SHUTDOWN))
vty_out (vty, " neighbor %s shutdown%s", addr, VTY_NEWLINE);
/* Password. */
if (peer->password)
if (!peer_group_active (peer)
|| ! g_peer->password
|| strcmp (peer->password, g_peer->password) != 0)
vty_out (vty, " neighbor %s password %s%s", addr, peer->password,
VTY_NEWLINE);
/* BGP port. */
if (peer->port != BGP_PORT_DEFAULT)
vty_out (vty, " neighbor %s port %d%s", addr, peer->port,
VTY_NEWLINE);
/* Local interface name. */
if (peer->ifname)
vty_out (vty, " neighbor %s interface %s%s", addr, peer->ifname,
VTY_NEWLINE);
/* Passive. */
if (CHECK_FLAG (peer->flags, PEER_FLAG_PASSIVE))
if (! peer_group_active (peer) ||
! CHECK_FLAG (g_peer->flags, PEER_FLAG_PASSIVE))
vty_out (vty, " neighbor %s passive%s", addr, VTY_NEWLINE);
/* EBGP multihop. */
if (peer_sort (peer) != BGP_PEER_IBGP && peer->ttl != 1 &&
!(peer->gtsm_hops != 0 && peer->ttl == MAXTTL))
if (! peer_group_active (peer) ||
g_peer->ttl != peer->ttl)
vty_out (vty, " neighbor %s ebgp-multihop %d%s", addr, peer->ttl,
VTY_NEWLINE);
/* ttl-security hops */
if (peer_sort (peer) != BGP_PEER_IBGP && peer->gtsm_hops != 0)
if (! peer_group_active (peer) || g_peer->gtsm_hops != peer->gtsm_hops)
vty_out (vty, " neighbor %s ttl-security hops %d%s", addr,
peer->gtsm_hops, VTY_NEWLINE);
/* disable-connected-check. */
if (CHECK_FLAG (peer->flags, PEER_FLAG_DISABLE_CONNECTED_CHECK))
if (! peer_group_active (peer) ||
! CHECK_FLAG (g_peer->flags, PEER_FLAG_DISABLE_CONNECTED_CHECK))
vty_out (vty, " neighbor %s disable-connected-check%s", addr, VTY_NEWLINE);
/* Update-source. */
if (peer->update_if)
if (! peer_group_active (peer) || ! g_peer->update_if
|| strcmp (g_peer->update_if, peer->update_if) != 0)
vty_out (vty, " neighbor %s update-source %s%s", addr,
peer->update_if, VTY_NEWLINE);
if (peer->update_source)
if (! peer_group_active (peer) || ! g_peer->update_source
|| sockunion_cmp (g_peer->update_source,
peer->update_source) != 0)
vty_out (vty, " neighbor %s update-source %s%s", addr,
sockunion2str (peer->update_source, buf, SU_ADDRSTRLEN),
VTY_NEWLINE);
/* advertisement-interval */
if (CHECK_FLAG (peer->config, PEER_CONFIG_ROUTEADV))
vty_out (vty, " neighbor %s advertisement-interval %d%s",
addr, peer->v_routeadv, VTY_NEWLINE);
/* timers. */
if (CHECK_FLAG (peer->config, PEER_CONFIG_TIMER)
&& ! peer_group_active (peer))
vty_out (vty, " neighbor %s timers %d %d%s", addr,
peer->keepalive, peer->holdtime, VTY_NEWLINE);
if (CHECK_FLAG (peer->config, PEER_CONFIG_CONNECT))
vty_out (vty, " neighbor %s timers connect %d%s", addr,
peer->connect, VTY_NEWLINE);
/* Default weight. */
if (CHECK_FLAG (peer->config, PEER_CONFIG_WEIGHT))
if (! peer_group_active (peer) ||
g_peer->weight != peer->weight)
vty_out (vty, " neighbor %s weight %d%s", addr, peer->weight,
VTY_NEWLINE);
/* Dynamic capability. */
if (CHECK_FLAG (peer->flags, PEER_FLAG_DYNAMIC_CAPABILITY))
if (! peer_group_active (peer) ||
! CHECK_FLAG (g_peer->flags, PEER_FLAG_DYNAMIC_CAPABILITY))
vty_out (vty, " neighbor %s capability dynamic%s", addr,
VTY_NEWLINE);
/* dont capability negotiation. */
if (CHECK_FLAG (peer->flags, PEER_FLAG_DONT_CAPABILITY))
if (! peer_group_active (peer) ||
! CHECK_FLAG (g_peer->flags, PEER_FLAG_DONT_CAPABILITY))
vty_out (vty, " neighbor %s dont-capability-negotiate%s", addr,
VTY_NEWLINE);
/* override capability negotiation. */
if (CHECK_FLAG (peer->flags, PEER_FLAG_OVERRIDE_CAPABILITY))
if (! peer_group_active (peer) ||
! CHECK_FLAG (g_peer->flags, PEER_FLAG_OVERRIDE_CAPABILITY))
vty_out (vty, " neighbor %s override-capability%s", addr,
VTY_NEWLINE);
/* strict capability negotiation. */
if (CHECK_FLAG (peer->flags, PEER_FLAG_STRICT_CAP_MATCH))
if (! peer_group_active (peer) ||
! CHECK_FLAG (g_peer->flags, PEER_FLAG_STRICT_CAP_MATCH))
vty_out (vty, " neighbor %s strict-capability-match%s", addr,
VTY_NEWLINE);
if (! peer_group_active (peer))
{
if (bgp_flag_check (bgp, BGP_FLAG_NO_DEFAULT_IPV4))
{
if (peer->afc[AFI_IP][SAFI_UNICAST])
vty_out (vty, " neighbor %s activate%s", addr, VTY_NEWLINE);
}
else
{
if (! peer->afc[AFI_IP][SAFI_UNICAST])
vty_out (vty, " no neighbor %s activate%s", addr, VTY_NEWLINE);
}
}
}
/************************************
****** Per AF to the neighbor ******
************************************/
if (! (afi == AFI_IP && safi == SAFI_UNICAST))
{
if (peer->af_group[afi][safi])
vty_out (vty, " neighbor %s peer-group %s%s", addr,
peer->group->name, VTY_NEWLINE);
else
vty_out (vty, " neighbor %s activate%s", addr, VTY_NEWLINE);
}
/* ORF capability. */
if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ORF_PREFIX_SM)
|| CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ORF_PREFIX_RM))
if (! peer->af_group[afi][safi])
{
vty_out (vty, " neighbor %s capability orf prefix-list", addr);
if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ORF_PREFIX_SM)
&& CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ORF_PREFIX_RM))
vty_out (vty, " both");
else if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_ORF_PREFIX_SM))
vty_out (vty, " send");
else
vty_out (vty, " receive");
vty_out (vty, "%s", VTY_NEWLINE);
}
/* Route reflector client. */
if (peer_af_flag_check (peer, afi, safi, PEER_FLAG_REFLECTOR_CLIENT)
&& ! peer->af_group[afi][safi])
vty_out (vty, " neighbor %s route-reflector-client%s", addr,
VTY_NEWLINE);
/* Nexthop self. */
if (peer_af_flag_check (peer, afi, safi, PEER_FLAG_NEXTHOP_SELF)
&& ! peer->af_group[afi][safi])
vty_out (vty, " neighbor %s next-hop-self%s", addr, VTY_NEWLINE);
/* Remove private AS. */
if (peer_af_flag_check (peer, afi, safi, PEER_FLAG_REMOVE_PRIVATE_AS)
&& ! peer->af_group[afi][safi])
vty_out (vty, " neighbor %s remove-private-AS%s",
addr, VTY_NEWLINE);
/* send-community print. */
if (! peer->af_group[afi][safi])
{
if (bgp_option_check (BGP_OPT_CONFIG_CISCO))
{
if (peer_af_flag_check (peer, afi, safi, PEER_FLAG_SEND_COMMUNITY)
&& peer_af_flag_check (peer, afi, safi, PEER_FLAG_SEND_EXT_COMMUNITY))
vty_out (vty, " neighbor %s send-community both%s", addr, VTY_NEWLINE);
else if (peer_af_flag_check (peer, afi, safi, PEER_FLAG_SEND_EXT_COMMUNITY))
vty_out (vty, " neighbor %s send-community extended%s",
addr, VTY_NEWLINE);
else if (peer_af_flag_check (peer, afi, safi, PEER_FLAG_SEND_COMMUNITY))
vty_out (vty, " neighbor %s send-community%s", addr, VTY_NEWLINE);
}
else
{
if (! peer_af_flag_check (peer, afi, safi, PEER_FLAG_SEND_COMMUNITY)
&& ! peer_af_flag_check (peer, afi, safi, PEER_FLAG_SEND_EXT_COMMUNITY))
vty_out (vty, " no neighbor %s send-community both%s",
addr, VTY_NEWLINE);
else if (! peer_af_flag_check (peer, afi, safi, PEER_FLAG_SEND_EXT_COMMUNITY))
vty_out (vty, " no neighbor %s send-community extended%s",
addr, VTY_NEWLINE);
else if (! peer_af_flag_check (peer, afi, safi, PEER_FLAG_SEND_COMMUNITY))
vty_out (vty, " no neighbor %s send-community%s",
addr, VTY_NEWLINE);
}
}
/* Default information */
if (peer_af_flag_check (peer, afi, safi, PEER_FLAG_DEFAULT_ORIGINATE)
&& ! peer->af_group[afi][safi])
{
vty_out (vty, " neighbor %s default-originate", addr);
if (peer->default_rmap[afi][safi].name)
vty_out (vty, " route-map %s", peer->default_rmap[afi][safi].name);
vty_out (vty, "%s", VTY_NEWLINE);
}
/* Soft reconfiguration inbound. */
if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_SOFT_RECONFIG))
if (! peer->af_group[afi][safi] ||
! CHECK_FLAG (g_peer->af_flags[afi][safi], PEER_FLAG_SOFT_RECONFIG))
vty_out (vty, " neighbor %s soft-reconfiguration inbound%s", addr,
VTY_NEWLINE);
/* maximum-prefix. */
if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX))
if (! peer->af_group[afi][safi]
|| g_peer->pmax[afi][safi] != peer->pmax[afi][safi]
|| g_peer->pmax_threshold[afi][safi] != peer->pmax_threshold[afi][safi]
|| CHECK_FLAG (g_peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING)
!= CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING))
{
vty_out (vty, " neighbor %s maximum-prefix %ld", addr, peer->pmax[afi][safi]);
if (peer->pmax_threshold[afi][safi] != MAXIMUM_PREFIX_THRESHOLD_DEFAULT)
vty_out (vty, " %d", peer->pmax_threshold[afi][safi]);
if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING))
vty_out (vty, " warning-only");
if (peer->pmax_restart[afi][safi])
vty_out (vty, " restart %d", peer->pmax_restart[afi][safi]);
vty_out (vty, "%s", VTY_NEWLINE);
}
/* Route server client. */
if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_RSERVER_CLIENT)
&& ! peer->af_group[afi][safi])
vty_out (vty, " neighbor %s route-server-client%s", addr, VTY_NEWLINE);
/* Nexthop-local unchanged. */
if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED)
&& ! peer->af_group[afi][safi])
vty_out (vty, " neighbor %s nexthop-local unchanged%s", addr, VTY_NEWLINE);
/* Allow AS in. */
if (peer_af_flag_check (peer, afi, safi, PEER_FLAG_ALLOWAS_IN))
if (! peer_group_active (peer)
|| ! peer_af_flag_check (g_peer, afi, safi, PEER_FLAG_ALLOWAS_IN)
|| peer->allowas_in[afi][safi] != g_peer->allowas_in[afi][safi])
{
if (peer->allowas_in[afi][safi] == 3)
vty_out (vty, " neighbor %s allowas-in%s", addr, VTY_NEWLINE);
else
vty_out (vty, " neighbor %s allowas-in %d%s", addr,
peer->allowas_in[afi][safi], VTY_NEWLINE);
}
/* Filter. */
bgp_config_write_filter (vty, peer, afi, safi);
/* atribute-unchanged. */
if ((CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_AS_PATH_UNCHANGED)
|| CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_NEXTHOP_UNCHANGED)
|| CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MED_UNCHANGED))
&& ! peer->af_group[afi][safi])
{
if (CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_AS_PATH_UNCHANGED)
&& CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_NEXTHOP_UNCHANGED)
&& CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MED_UNCHANGED))
vty_out (vty, " neighbor %s attribute-unchanged%s", addr, VTY_NEWLINE);
else
vty_out (vty, " neighbor %s attribute-unchanged%s%s%s%s", addr,
(CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_AS_PATH_UNCHANGED)) ?
" as-path" : "",
(CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_NEXTHOP_UNCHANGED)) ?
" next-hop" : "",
(CHECK_FLAG (peer->af_flags[afi][safi], PEER_FLAG_MED_UNCHANGED)) ?
" med" : "", VTY_NEWLINE);
}
}
/* Display "address-family" configuration header. */
void
bgp_config_write_family_header (struct vty *vty, afi_t afi, safi_t safi,
int *write)
{
if (*write)
return;
if (afi == AFI_IP && safi == SAFI_UNICAST)
return;
vty_out (vty, "!%s address-family ", VTY_NEWLINE);
if (afi == AFI_IP)
{
if (safi == SAFI_MULTICAST)
vty_out (vty, "ipv4 multicast");
else if (safi == SAFI_MPLS_VPN)
vty_out (vty, "vpnv4 unicast");
}
else if (afi == AFI_IP6)
{
vty_out (vty, "ipv6");
if (safi == SAFI_MULTICAST)
vty_out (vty, " multicast");
}
vty_out (vty, "%s", VTY_NEWLINE);
*write = 1;
}
/* Address family based peer configuration display. */
static int
bgp_config_write_family (struct vty *vty, struct bgp *bgp, afi_t afi,
safi_t safi)
{
int write = 0;
struct peer *peer;
struct peer_group *group;
struct listnode *node, *nnode;
bgp_config_write_network (vty, bgp, afi, safi, &write);
bgp_config_write_redistribute (vty, bgp, afi, safi, &write);
for (ALL_LIST_ELEMENTS (bgp->group, node, nnode, group))
{
if (group->conf->afc[afi][safi])
{
bgp_config_write_family_header (vty, afi, safi, &write);
bgp_config_write_peer (vty, bgp, group->conf, afi, safi);
}
}
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
if (peer->afc[afi][safi])
{
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_ACCEPT_PEER))
{
bgp_config_write_family_header (vty, afi, safi, &write);
bgp_config_write_peer (vty, bgp, peer, afi, safi);
}
}
}
bgp_config_write_maxpaths (vty, bgp, afi, safi, &write);
if (write)
vty_out (vty, " exit-address-family%s", VTY_NEWLINE);
return write;
}
int
bgp_config_write (struct vty *vty)
{
int write = 0;
struct bgp *bgp;
struct peer_group *group;
struct peer *peer;
struct listnode *node, *nnode;
struct listnode *mnode, *mnnode;
/* BGP Multiple instance. */
if (bgp_option_check (BGP_OPT_MULTIPLE_INSTANCE))
{
vty_out (vty, "bgp multiple-instance%s", VTY_NEWLINE);
write++;
}
/* BGP Config type. */
if (bgp_option_check (BGP_OPT_CONFIG_CISCO))
{
vty_out (vty, "bgp config-type cisco%s", VTY_NEWLINE);
write++;
}
/* BGP configuration. */
for (ALL_LIST_ELEMENTS (bm->bgp, mnode, mnnode, bgp))
{
if (write)
vty_out (vty, "!%s", VTY_NEWLINE);
/* Router bgp ASN */
vty_out (vty, "router bgp %u", bgp->as);
if (bgp_option_check (BGP_OPT_MULTIPLE_INSTANCE))
{
if (bgp->name)
vty_out (vty, " view %s", bgp->name);
}
vty_out (vty, "%s", VTY_NEWLINE);
/* No Synchronization */
if (bgp_option_check (BGP_OPT_CONFIG_CISCO))
vty_out (vty, " no synchronization%s", VTY_NEWLINE);
/* BGP fast-external-failover. */
if (CHECK_FLAG (bgp->flags, BGP_FLAG_NO_FAST_EXT_FAILOVER))
vty_out (vty, " no bgp fast-external-failover%s", VTY_NEWLINE);
/* BGP router ID. */
if (CHECK_FLAG (bgp->config, BGP_CONFIG_ROUTER_ID))
vty_out (vty, " bgp router-id %s%s", inet_ntoa (bgp->router_id),
VTY_NEWLINE);
/* BGP log-neighbor-changes. */
if (bgp_flag_check (bgp, BGP_FLAG_LOG_NEIGHBOR_CHANGES))
vty_out (vty, " bgp log-neighbor-changes%s", VTY_NEWLINE);
/* BGP configuration. */
if (bgp_flag_check (bgp, BGP_FLAG_ALWAYS_COMPARE_MED))
vty_out (vty, " bgp always-compare-med%s", VTY_NEWLINE);
/* BGP default ipv4-unicast. */
if (bgp_flag_check (bgp, BGP_FLAG_NO_DEFAULT_IPV4))
vty_out (vty, " no bgp default ipv4-unicast%s", VTY_NEWLINE);
/* BGP default local-preference. */
if (bgp->default_local_pref != BGP_DEFAULT_LOCAL_PREF)
vty_out (vty, " bgp default local-preference %d%s",
bgp->default_local_pref, VTY_NEWLINE);
/* BGP client-to-client reflection. */
if (bgp_flag_check (bgp, BGP_FLAG_NO_CLIENT_TO_CLIENT))
vty_out (vty, " no bgp client-to-client reflection%s", VTY_NEWLINE);
/* BGP cluster ID. */
if (CHECK_FLAG (bgp->config, BGP_CONFIG_CLUSTER_ID))
vty_out (vty, " bgp cluster-id %s%s", inet_ntoa (bgp->cluster_id),
VTY_NEWLINE);
/* Confederation identifier*/
if (CHECK_FLAG (bgp->config, BGP_CONFIG_CONFEDERATION))
vty_out (vty, " bgp confederation identifier %i%s", bgp->confed_id,
VTY_NEWLINE);
/* Confederation peer */
if (bgp->confed_peers_cnt > 0)
{
int i;
vty_out (vty, " bgp confederation peers");
for (i = 0; i < bgp->confed_peers_cnt; i++)
vty_out(vty, " %u", bgp->confed_peers[i]);
vty_out (vty, "%s", VTY_NEWLINE);
}
/* BGP enforce-first-as. */
if (bgp_flag_check (bgp, BGP_FLAG_ENFORCE_FIRST_AS))
vty_out (vty, " bgp enforce-first-as%s", VTY_NEWLINE);
/* BGP deterministic-med. */
if (bgp_flag_check (bgp, BGP_FLAG_DETERMINISTIC_MED))
vty_out (vty, " bgp deterministic-med%s", VTY_NEWLINE);
/* BGP graceful-restart. */
if (bgp->stalepath_time != BGP_DEFAULT_STALEPATH_TIME)
vty_out (vty, " bgp graceful-restart stalepath-time %d%s",
bgp->stalepath_time, VTY_NEWLINE);
if (bgp_flag_check (bgp, BGP_FLAG_GRACEFUL_RESTART))
vty_out (vty, " bgp graceful-restart%s", VTY_NEWLINE);
/* BGP bestpath method. */
if (bgp_flag_check (bgp, BGP_FLAG_ASPATH_IGNORE))
vty_out (vty, " bgp bestpath as-path ignore%s", VTY_NEWLINE);
if (bgp_flag_check (bgp, BGP_FLAG_ASPATH_CONFED))
vty_out (vty, " bgp bestpath as-path confed%s", VTY_NEWLINE);
if (bgp_flag_check (bgp, BGP_FLAG_COMPARE_ROUTER_ID))
vty_out (vty, " bgp bestpath compare-routerid%s", VTY_NEWLINE);
if (bgp_flag_check (bgp, BGP_FLAG_MED_CONFED)
|| bgp_flag_check (bgp, BGP_FLAG_MED_MISSING_AS_WORST))
{
vty_out (vty, " bgp bestpath med");
if (bgp_flag_check (bgp, BGP_FLAG_MED_CONFED))
vty_out (vty, " confed");
if (bgp_flag_check (bgp, BGP_FLAG_MED_MISSING_AS_WORST))
vty_out (vty, " missing-as-worst");
vty_out (vty, "%s", VTY_NEWLINE);
}
/* BGP network import check. */
if (bgp_flag_check (bgp, BGP_FLAG_IMPORT_CHECK))
vty_out (vty, " bgp network import-check%s", VTY_NEWLINE);
/* BGP scan interval. */
bgp_config_write_scan_time (vty);
/* BGP flag dampening. */
if (CHECK_FLAG (bgp->af_flags[AFI_IP][SAFI_UNICAST],
BGP_CONFIG_DAMPENING))
bgp_config_write_damp (vty);
/* BGP static route configuration. */
bgp_config_write_network (vty, bgp, AFI_IP, SAFI_UNICAST, &write);
/* BGP redistribute configuration. */
bgp_config_write_redistribute (vty, bgp, AFI_IP, SAFI_UNICAST, &write);
/* BGP timers configuration. */
if (bgp->default_keepalive != BGP_DEFAULT_KEEPALIVE
&& bgp->default_holdtime != BGP_DEFAULT_HOLDTIME)
vty_out (vty, " timers bgp %d %d%s", bgp->default_keepalive,
bgp->default_holdtime, VTY_NEWLINE);
/* peer-group */
for (ALL_LIST_ELEMENTS (bgp->group, node, nnode, group))
{
bgp_config_write_peer (vty, bgp, group->conf, AFI_IP, SAFI_UNICAST);
}
/* Normal neighbor configuration. */
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
{
if (! CHECK_FLAG (peer->sflags, PEER_STATUS_ACCEPT_PEER))
bgp_config_write_peer (vty, bgp, peer, AFI_IP, SAFI_UNICAST);
}
/* maximum-paths */
bgp_config_write_maxpaths (vty, bgp, AFI_IP, SAFI_UNICAST, &write);
/* Distance configuration. */
bgp_config_write_distance (vty, bgp);
/* No auto-summary */
if (bgp_option_check (BGP_OPT_CONFIG_CISCO))
vty_out (vty, " no auto-summary%s", VTY_NEWLINE);
/* IPv4 multicast configuration. */
write += bgp_config_write_family (vty, bgp, AFI_IP, SAFI_MULTICAST);
/* IPv4 VPN configuration. */
write += bgp_config_write_family (vty, bgp, AFI_IP, SAFI_MPLS_VPN);
/* IPv6 unicast configuration. */
write += bgp_config_write_family (vty, bgp, AFI_IP6, SAFI_UNICAST);
/* IPv6 multicast configuration. */
write += bgp_config_write_family (vty, bgp, AFI_IP6, SAFI_MULTICAST);
write++;
}
return write;
}
void
bgp_master_init (void)
{
memset (&bgp_master, 0, sizeof (struct bgp_master));
bm = &bgp_master;
bm->bgp = list_new ();
bm->listen_sockets = list_new ();
bm->port = BGP_PORT_DEFAULT;
bm->master = thread_master_create ();
bm->start_time = bgp_clock ();
}
void
bgp_init (void)
{
/* BGP VTY commands installation. */
bgp_vty_init ();
/* Init kroute. */
bgp_kroute_init ();
/* BGP inits. */
bgp_attr_init ();
bgp_debug_init ();
bgp_dump_init ();
bgp_route_init ();
bgp_route_map_init ();
bgp_scan_init ();
bgp_mplsvpn_init ();
/* Access list initialize. */
access_list_init ();
access_list_add_hook (peer_distribute_update);
access_list_delete_hook (peer_distribute_update);
/* Filter list initialize. */
bgp_filter_init ();
as_list_add_hook (peer_aslist_update);
as_list_delete_hook (peer_aslist_update);
/* Prefix list initialize.*/
prefix_list_init ();
prefix_list_add_hook (peer_prefix_list_update);
prefix_list_delete_hook (peer_prefix_list_update);
/* Community list initialize. */
bgp_clist = community_list_init ();
#ifdef HAVE_SNMP
bgp_snmp_init ();
#endif /* HAVE_SNMP */
}
void
bgp_terminate (void)
{
struct bgp *bgp;
struct peer *peer;
struct listnode *node, *nnode;
struct listnode *mnode, *mnnode;
for (ALL_LIST_ELEMENTS (bm->bgp, mnode, mnnode, bgp))
for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer))
if (peer->status == Established)
bgp_notify_send (peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_PEER_UNCONFIG);
bgp_cleanup_routes ();
if (bm->process_main_queue)
{
work_queue_free (bm->process_main_queue);
bm->process_main_queue = NULL;
}
if (bm->process_rsclient_queue)
{
work_queue_free (bm->process_rsclient_queue);
bm->process_rsclient_queue = NULL;
}
}
| AirbornWdd/qpimd | bgpd/bgpd.c | C | gpl-2.0 | 144,930 |
#include <linux/module.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_NFLOG.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/nfnetlink_log.h>
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_DESCRIPTION("Xtables: packet logging to netlink using NFLOG");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_NFLOG");
MODULE_ALIAS("ip6t_NFLOG");
static unsigned int
nflog_tg(struct sk_buff *skb, const struct xt_target_param *par)
{
const struct xt_nflog_info *info = par->targinfo;
struct nf_loginfo li;
li.type = NF_LOG_TYPE_ULOG;
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
nfulnl_log_packet(par->family, par->hooknum, skb, par->in,
par->out, &li, info->prefix);
return XT_CONTINUE;
}
static bool nflog_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_nflog_info *info = par->targinfo;
if (info->flags & ~XT_NFLOG_MASK)
return false;
if (info->prefix[sizeof(info->prefix) - 1] != '\0')
return false;
return true;
}
static struct xt_target nflog_tg_reg __read_mostly = {
.name = "NFLOG",
.revision = 0,
.family = NFPROTO_UNSPEC,
.checkentry = nflog_tg_check,
.target = nflog_tg,
.targetsize = sizeof(struct xt_nflog_info),
.me = THIS_MODULE,
};
static int __init nflog_tg_init(void)
{
return xt_register_target(&nflog_tg_reg);
}
static void __exit nflog_tg_exit(void)
{
xt_unregister_target(&nflog_tg_reg);
}
module_init(nflog_tg_init);
module_exit(nflog_tg_exit);
| leemgs/OptimusOneKernel-KandroidCommunity | net/netfilter/xt_NFLOG.c | C | gpl-2.0 | 1,599 |
/*
* linux/arch/arm/kernel/setup.c
*
* Copyright (C) 1995-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
#include <linux/init.h>
#include <linux/kexec.h>
#include <linux/of_fdt.h>
#include <linux/crash_dump.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
#include <asm/unified.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/procinfo.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
#include <asm/mach-types.h>
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/tlbflush.h>
#include <asm/prom.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include <asm/traps.h>
#include <asm/unwind.h>
#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
#include "compat.h"
#endif
#include "atags.h"
#include "tcm.h"
#ifndef MEM_SIZE
#define MEM_SIZE (16*1024*1024)
#endif
#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
char fpe_type[8];
static int __init fpe_setup(char *line)
{
memcpy(fpe_type, line, 8);
return 1;
}
__setup("fpe=", fpe_setup);
#endif
extern void paging_init(struct machine_desc *desc);
extern void sanity_check_meminfo(void);
extern void reboot_setup(char *str);
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
unsigned int __machine_arch_type __read_mostly;
EXPORT_SYMBOL(__machine_arch_type);
unsigned int cacheid __read_mostly;
EXPORT_SYMBOL(cacheid);
unsigned int __atags_pointer __initdata;
unsigned int system_rev;
EXPORT_SYMBOL(system_rev);
unsigned int system_serial_low;
EXPORT_SYMBOL(system_serial_low);
unsigned int system_serial_high;
EXPORT_SYMBOL(system_serial_high);
unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL(elf_hwcap);
#ifdef MULTI_CPU
struct processor processor __read_mostly;
#endif
#ifdef MULTI_TLB
struct cpu_tlb_fns cpu_tlb __read_mostly;
#endif
#ifdef MULTI_USER
struct cpu_user_fns cpu_user __read_mostly;
#endif
#ifdef MULTI_CACHE
struct cpu_cache_fns cpu_cache __read_mostly;
#endif
#ifdef CONFIG_OUTER_CACHE
struct outer_cache_fns outer_cache __read_mostly;
EXPORT_SYMBOL(outer_cache);
#endif
struct stack {
u32 irq[3];
u32 abt[3];
u32 und[3];
} ____cacheline_aligned;
static struct stack stacks[NR_CPUS];
char elf_platform[ELF_PLATFORM_SIZE];
EXPORT_SYMBOL(elf_platform);
static const char *cpu_name;
static const char *machine_name;
static char __initdata cmd_line[COMMAND_LINE_SIZE];
struct machine_desc *machine_desc __initdata;
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
#define ENDIANNESS ((char)endian_test.l)
DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
/*
* Standard memory resources
*/
static struct resource mem_res[] = {
{
.name = "Video RAM",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM
},
{
.name = "Kernel text",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM
},
{
.name = "Kernel data",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM
}
};
#define video_ram mem_res[0]
#define kernel_code mem_res[1]
#define kernel_data mem_res[2]
static struct resource io_res[] = {
{
.name = "reserved",
.start = 0x3bc,
.end = 0x3be,
.flags = IORESOURCE_IO | IORESOURCE_BUSY
},
{
.name = "reserved",
.start = 0x378,
.end = 0x37f,
.flags = IORESOURCE_IO | IORESOURCE_BUSY
},
{
.name = "reserved",
.start = 0x278,
.end = 0x27f,
.flags = IORESOURCE_IO | IORESOURCE_BUSY
}
};
#define lp0 io_res[0]
#define lp1 io_res[1]
#define lp2 io_res[2]
static const char *proc_arch[] = {
"undefined/unknown",
"3",
"4",
"4T",
"5",
"5T",
"5TE",
"5TEJ",
"6TEJ",
"7",
"?(11)",
"?(12)",
"?(13)",
"?(14)",
"?(15)",
"?(16)",
"?(17)",
};
int cpu_architecture(void)
{
int cpu_arch;
if ((read_cpuid_id() & 0x0008f000) == 0) {
cpu_arch = CPU_ARCH_UNKNOWN;
} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
cpu_arch = (read_cpuid_id() >> 16) & 7;
if (cpu_arch)
cpu_arch += CPU_ARCH_ARMv3;
} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
unsigned int mmfr0;
/* Revised CPUID format. Read the Memory Model Feature
* Register 0 and check for VMSAv7 or PMSAv7 */
asm("mrc p15, 0, %0, c0, c1, 4"
: "=r" (mmfr0));
if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
(mmfr0 & 0x000000f0) >= 0x00000030)
cpu_arch = CPU_ARCH_ARMv7;
else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
(mmfr0 & 0x000000f0) == 0x00000020)
cpu_arch = CPU_ARCH_ARMv6;
else
cpu_arch = CPU_ARCH_UNKNOWN;
} else
cpu_arch = CPU_ARCH_UNKNOWN;
return cpu_arch;
}
static int cpu_has_aliasing_icache(unsigned int arch)
{
int aliasing_icache;
unsigned int id_reg, num_sets, line_size;
/* arch specifies the register format */
switch (arch) {
case CPU_ARCH_ARMv7:
asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
: /* No output operands */
: "r" (1));
isb();
asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
: "=r" (id_reg));
line_size = 4 << ((id_reg & 0x7) + 2);
num_sets = ((id_reg >> 13) & 0x7fff) + 1;
aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
break;
case CPU_ARCH_ARMv6:
aliasing_icache = read_cpuid_cachetype() & (1 << 11);
break;
default:
/* I-cache aliases will be handled by D-cache aliasing code */
aliasing_icache = 0;
}
return aliasing_icache;
}
static void __init cacheid_init(void)
{
unsigned int cachetype = read_cpuid_cachetype();
unsigned int arch = cpu_architecture();
if (arch >= CPU_ARCH_ARMv6) {
if ((cachetype & (7 << 29)) == 4 << 29) {
/* ARMv7 register format */
cacheid = CACHEID_VIPT_NONALIASING;
if ((cachetype & (3 << 14)) == 1 << 14)
cacheid |= CACHEID_ASID_TAGGED;
else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
cacheid |= CACHEID_VIPT_I_ALIASING;
} else if (cachetype & (1 << 23)) {
cacheid = CACHEID_VIPT_ALIASING;
} else {
cacheid = CACHEID_VIPT_NONALIASING;
if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
cacheid |= CACHEID_VIPT_I_ALIASING;
}
} else {
cacheid = CACHEID_VIVT;
}
printk("CPU: %s data cache, %s instruction cache\n",
cache_is_vivt() ? "VIVT" :
cache_is_vipt_aliasing() ? "VIPT aliasing" :
cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
cache_is_vivt() ? "VIVT" :
icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
icache_is_vipt_aliasing() ? "VIPT aliasing" :
cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
}
/*
* These functions re-use the assembly code in head.S, which
* already provide the required functionality.
*/
extern struct proc_info_list *lookup_processor_type(unsigned int);
void __init early_print(const char *str, ...)
{
extern void printascii(const char *);
char buf[256];
va_list ap;
va_start(ap, str);
vsnprintf(buf, sizeof(buf), str, ap);
va_end(ap);
#ifdef CONFIG_DEBUG_LL
printascii(buf);
#endif
printk("%s", buf);
}
static void __init feat_v6_fixup(void)
{
int id = read_cpuid_id();
if ((id & 0xff0f0000) != 0x41070000)
return;
/*
* HWCAP_TLS is available only on 1136 r1p0 and later,
* see also kuser_get_tls_init.
*/
if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
elf_hwcap &= ~HWCAP_TLS;
}
static void __init setup_processor(void)
{
struct proc_info_list *list;
/*
* locate processor in the list of supported processor
* types. The linker builds this table for us from the
* entries in arch/arm/mm/proc-*.S
*/
list = lookup_processor_type(read_cpuid_id());
if (!list) {
printk("CPU configuration botched (ID %08x), unable "
"to continue.\n", read_cpuid_id());
while (1);
}
cpu_name = list->cpu_name;
#ifdef MULTI_CPU
processor = *list->proc;
#endif
#ifdef MULTI_TLB
cpu_tlb = *list->tlb;
#endif
#ifdef MULTI_USER
cpu_user = *list->user;
#endif
#ifdef MULTI_CACHE
cpu_cache = *list->cache;
#endif
printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
proc_arch[cpu_architecture()], cr_alignment);
sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
elf_hwcap = list->elf_hwcap;
#ifndef CONFIG_ARM_THUMB
elf_hwcap &= ~HWCAP_THUMB;
#endif
feat_v6_fixup();
cacheid_init();
cpu_proc_init();
}
/*
* cpu_init - initialise one CPU.
*
* cpu_init sets up the per-CPU stacks.
*/
void cpu_init(void)
{
unsigned int cpu = smp_processor_id();
struct stack *stk = &stacks[cpu];
if (cpu >= NR_CPUS) {
printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
BUG();
}
/*
* Define the placement constraint for the inline asm directive below.
* In Thumb-2, msr with an immediate value is not allowed.
*/
#ifdef CONFIG_THUMB2_KERNEL
#define PLC "r"
#else
#define PLC "I"
#endif
/*
* setup stacks for re-entrant exception handlers
*/
__asm__ (
"msr cpsr_c, %1\n\t"
"add r14, %0, %2\n\t"
"mov sp, r14\n\t"
"msr cpsr_c, %3\n\t"
"add r14, %0, %4\n\t"
"mov sp, r14\n\t"
"msr cpsr_c, %5\n\t"
"add r14, %0, %6\n\t"
"mov sp, r14\n\t"
"msr cpsr_c, %7"
:
: "r" (stk),
PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
"I" (offsetof(struct stack, irq[0])),
PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
"I" (offsetof(struct stack, abt[0])),
PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
"I" (offsetof(struct stack, und[0])),
PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14");
}
void __init dump_machine_table(void)
{
struct machine_desc *p;
early_print("Available machine support:\n\nID (hex)\tNAME\n");
for_each_machine_desc(p)
early_print("%08x\t%s\n", p->nr, p->name);
early_print("\nPlease check your kernel config and/or bootloader.\n");
while (true)
/* can't use cpu_relax() here as it may require MMU setup */;
}
int __init arm_add_memory(phys_addr_t start, unsigned long size)
{
struct membank *bank = &meminfo.bank[meminfo.nr_banks];
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
"ignoring memory at 0x%08llx\n", (long long)start);
return -EINVAL;
}
/*
* Ensure that start/size are aligned to a page boundary.
* Size is appropriately rounded down, start is rounded up.
*/
size -= start & ~PAGE_MASK;
bank->start = PAGE_ALIGN(start);
bank->size = size & PAGE_MASK;
/*
* Check whether this memory region has non-zero size or
* invalid node number.
*/
if (bank->size == 0)
return -EINVAL;
meminfo.nr_banks++;
return 0;
}
/*
* Pick out the memory size. We look for mem=size@start,
* where start and size are "size[KkMm]"
*/
static int __init early_mem(char *p)
{
static int usermem __initdata = 0;
unsigned long size;
phys_addr_t start;
char *endp;
/*
* If the user specifies memory size, we
* blow away any automatically generated
* size.
*/
if (usermem == 0) {
usermem = 1;
meminfo.nr_banks = 0;
}
start = PHYS_OFFSET;
size = memparse(p, &endp);
if (*endp == '@')
start = memparse(endp + 1, NULL);
arm_add_memory(start, size);
return 0;
}
early_param("mem", early_mem);
static void __init
setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
{
#ifdef CONFIG_BLK_DEV_RAM
extern int rd_size, rd_image_start, rd_prompt, rd_doload;
rd_image_start = image_start;
rd_prompt = prompt;
rd_doload = doload;
if (rd_sz)
rd_size = rd_sz;
#endif
}
static void __init request_standard_resources(struct machine_desc *mdesc)
{
struct memblock_region *region;
struct resource *res;
kernel_code.start = virt_to_phys(_text);
kernel_code.end = virt_to_phys(_etext - 1);
kernel_data.start = virt_to_phys(_sdata);
kernel_data.end = virt_to_phys(_end - 1);
for_each_memblock(memory, region) {
res = alloc_bootmem_low(sizeof(*res));
res->name = "System RAM";
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
if (kernel_code.start >= res->start &&
kernel_code.end <= res->end)
request_resource(res, &kernel_code);
if (kernel_data.start >= res->start &&
kernel_data.end <= res->end)
request_resource(res, &kernel_data);
}
if (mdesc->video_start) {
video_ram.start = mdesc->video_start;
video_ram.end = mdesc->video_end;
request_resource(&iomem_resource, &video_ram);
}
/*
* Some machines don't have the possibility of ever
* possessing lp0, lp1 or lp2
*/
if (mdesc->reserve_lp0)
request_resource(&ioport_resource, &lp0);
if (mdesc->reserve_lp1)
request_resource(&ioport_resource, &lp1);
if (mdesc->reserve_lp2)
request_resource(&ioport_resource, &lp2);
}
/*
* Tag parsing.
*
* This is the new way of passing data to the kernel at boot time. Rather
* than passing a fixed inflexible structure to the kernel, we pass a list
* of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
* tag for the list to be recognised (to distinguish the tagged list from
* a param_struct). The list is terminated with a zero-length tag (this tag
* is not parsed in any way).
*/
static int __init parse_tag_core(const struct tag *tag)
{
if (tag->hdr.size > 2) {
if ((tag->u.core.flags & 1) == 0)
root_mountflags &= ~MS_RDONLY;
ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
}
return 0;
}
__tagtable(ATAG_CORE, parse_tag_core);
static int __init parse_tag_mem32(const struct tag *tag)
{
return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
}
__tagtable(ATAG_MEM, parse_tag_mem32);
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = {
.orig_video_lines = 30,
.orig_video_cols = 80,
.orig_video_mode = 0,
.orig_video_ega_bx = 0,
.orig_video_isVGA = 1,
.orig_video_points = 8
};
static int __init parse_tag_videotext(const struct tag *tag)
{
screen_info.orig_x = tag->u.videotext.x;
screen_info.orig_y = tag->u.videotext.y;
screen_info.orig_video_page = tag->u.videotext.video_page;
screen_info.orig_video_mode = tag->u.videotext.video_mode;
screen_info.orig_video_cols = tag->u.videotext.video_cols;
screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
screen_info.orig_video_lines = tag->u.videotext.video_lines;
screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
screen_info.orig_video_points = tag->u.videotext.video_points;
return 0;
}
__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
#endif
static int __init parse_tag_ramdisk(const struct tag *tag)
{
setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
(tag->u.ramdisk.flags & 2) == 0,
tag->u.ramdisk.start, tag->u.ramdisk.size);
return 0;
}
__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
static int __init parse_tag_serialnr(const struct tag *tag)
{
system_serial_low = tag->u.serialnr.low;
system_serial_high = tag->u.serialnr.high;
return 0;
}
__tagtable(ATAG_SERIAL, parse_tag_serialnr);
static int __init parse_tag_revision(const struct tag *tag)
{
system_rev = tag->u.revision.rev;
return 0;
}
__tagtable(ATAG_REVISION, parse_tag_revision);
static int __init parse_tag_cmdline(const struct tag *tag)
{
#if defined(CONFIG_CMDLINE_EXTEND)
strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
strlcat(default_command_line, tag->u.cmdline.cmdline,
COMMAND_LINE_SIZE);
#elif defined(CONFIG_CMDLINE_FORCE)
pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
#else
strlcpy(default_command_line, tag->u.cmdline.cmdline,
COMMAND_LINE_SIZE);
#endif
return 0;
}
__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
/*
* Scan the tag table for this tag, and call its parse function.
* The tag table is built by the linker from all the __tagtable
* declarations.
*/
static int __init parse_tag(const struct tag *tag)
{
extern struct tagtable __tagtable_begin, __tagtable_end;
struct tagtable *t;
for (t = &__tagtable_begin; t < &__tagtable_end; t++)
if (tag->hdr.tag == t->tag) {
t->parse(tag);
break;
}
return t < &__tagtable_end;
}
/*
* Parse all tags in the list, checking both the global and architecture
* specific tag tables.
*/
static void __init parse_tags(const struct tag *t)
{
for (; t->hdr.size; t = tag_next(t))
if (!parse_tag(t))
printk(KERN_WARNING
"Ignoring unrecognised tag 0x%08x\n",
t->hdr.tag);
}
/*
* This holds our defaults.
*/
static struct init_tags {
struct tag_header hdr1;
struct tag_core core;
struct tag_header hdr2;
struct tag_mem32 mem;
struct tag_header hdr3;
} init_tags __initdata = {
{ tag_size(tag_core), ATAG_CORE },
{ 1, PAGE_SIZE, 0xff },
{ tag_size(tag_mem32), ATAG_MEM },
{ MEM_SIZE },
{ 0, ATAG_NONE }
};
static int __init customize_machine(void)
{
/* customizes platform devices, or adds new ones */
if (machine_desc->init_machine)
machine_desc->init_machine();
return 0;
}
arch_initcall(customize_machine);
#ifdef CONFIG_KEXEC
static inline unsigned long long get_total_mem(void)
{
unsigned long total;
total = max_low_pfn - min_low_pfn;
return total << PAGE_SHIFT;
}
/**
* reserve_crashkernel() - reserves memory are for crash kernel
*
* This function reserves memory area given in "crashkernel=" kernel command
* line parameter. The memory reserved is used by a dump capture kernel when
* primary kernel is crashing.
*/
static void __init reserve_crashkernel(void)
{
unsigned long long crash_size, crash_base;
unsigned long long total_mem;
int ret;
total_mem = get_total_mem();
ret = parse_crashkernel(boot_command_line, total_mem,
&crash_size, &crash_base);
if (ret)
return;
ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
if (ret < 0) {
printk(KERN_WARNING "crashkernel reservation failed - "
"memory is in use (0x%lx)\n", (unsigned long)crash_base);
return;
}
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
"for crashkernel (System RAM: %ldMB)\n",
(unsigned long)(crash_size >> 20),
(unsigned long)(crash_base >> 20),
(unsigned long)(total_mem >> 20));
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
insert_resource(&iomem_resource, &crashk_res);
}
#else
static inline void reserve_crashkernel(void) {}
#endif /* CONFIG_KEXEC */
static void __init squash_mem_tags(struct tag *tag)
{
for (; tag->hdr.size; tag = tag_next(tag))
if (tag->hdr.tag == ATAG_MEM)
tag->hdr.tag = ATAG_NONE;
}
static struct machine_desc * __init setup_machine_tags(unsigned int nr)
{
struct tag *tags = (struct tag *)&init_tags;
struct machine_desc *mdesc = NULL, *p;
char *from = default_command_line;
init_tags.mem.start = PHYS_OFFSET;
/*
* locate machine in the list of supported machines.
*/
for_each_machine_desc(p)
if (nr == p->nr) {
printk("Machine: %s\n", p->name);
mdesc = p;
break;
}
if (!mdesc) {
early_print("\nError: unrecognized/unsupported machine ID"
" (r1 = 0x%08x).\n\n", nr);
dump_machine_table(); /* does not return */
}
if (__atags_pointer)
tags = phys_to_virt(__atags_pointer);
else if (mdesc->boot_params) {
#ifdef CONFIG_MMU
/*
* We still are executing with a minimal MMU mapping created
* with the presumption that the machine default for this
* is located in the first MB of RAM. Anything else will
* fault and silently hang the kernel at this point.
*/
if (mdesc->boot_params < PHYS_OFFSET ||
mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
printk(KERN_WARNING
"Default boot params at physical 0x%08lx out of reach\n",
mdesc->boot_params);
} else
#endif
{
tags = phys_to_virt(mdesc->boot_params);
}
}
#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
/*
* If we have the old style parameters, convert them to
* a tag list.
*/
if (tags->hdr.tag != ATAG_CORE)
convert_to_tag_list(tags);
#endif
if (tags->hdr.tag != ATAG_CORE) {
#if defined(CONFIG_OF)
/*
* If CONFIG_OF is set, then assume this is a reasonably
* modern system that should pass boot parameters
*/
early_print("Warning: Neither atags nor dtb found\n");
#endif
tags = (struct tag *)&init_tags;
}
if (mdesc->fixup)
mdesc->fixup(mdesc, tags, &from, &meminfo);
if (tags->hdr.tag == ATAG_CORE) {
if (meminfo.nr_banks != 0)
squash_mem_tags(tags);
save_atags(tags);
parse_tags(tags);
}
/* parse_early_param needs a boot_command_line */
strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
return mdesc;
}
void __init setup_arch(char **cmdline_p)
{
struct machine_desc *mdesc;
unwind_init();
setup_processor();
mdesc = setup_machine_fdt(__atags_pointer);
if (!mdesc)
mdesc = setup_machine_tags(machine_arch_type);
machine_desc = mdesc;
machine_name = mdesc->name;
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
extern unsigned long arm_dma_zone_size;
arm_dma_zone_size = mdesc->dma_zone_size;
}
#endif
if (mdesc->soft_reboot)
reboot_setup("s");
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (unsigned long) _end;
/* populate cmd_line too for later use, preserving boot_command_line */
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
parse_early_param();
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
paging_init(mdesc);
request_standard_resources(mdesc);
unflatten_device_tree();
#ifdef CONFIG_SMP
if (is_smp())
smp_init_cpus();
#endif
reserve_crashkernel();
cpu_init();
tcm_init();
#ifdef CONFIG_MULTI_IRQ_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
#endif
early_trap_init();
if (mdesc->init_early)
mdesc->init_early();
}
static int __init topology_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
cpuinfo->cpu.hotpluggable = 1;
register_cpu(&cpuinfo->cpu, cpu);
}
return 0;
}
subsys_initcall(topology_init);
#ifdef CONFIG_HAVE_PROC_CPU
static int __init proc_cpu_init(void)
{
struct proc_dir_entry *res;
res = proc_mkdir("cpu", NULL);
if (!res)
return -ENOMEM;
return 0;
}
fs_initcall(proc_cpu_init);
#endif
static const char *hwcap_str[] = {
"swp",
"half",
"thumb",
"26bit",
"fastmult",
"fpa",
"vfp",
"edsp",
"java",
"iwmmxt",
"crunch",
"thumbee",
"neon",
"vfpv3",
"vfpv3d16",
"tls",
"vfpv4",
"idiva",
"idivt",
NULL
};
static int c_show(struct seq_file *m, void *v)
{
int i;
seq_printf(m, "Processor\t: %s rev %d (%s)\n",
cpu_name, read_cpuid_id() & 15, elf_platform);
#if defined(CONFIG_SMP)
for_each_online_cpu(i) {
/*
* glibc reads /proc/cpuinfo to determine the number of
* online processors, looking for lines beginning with
* "processor". Give glibc what it expects.
*/
seq_printf(m, "processor\t: %d\n", i);
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
(per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
}
#else /* CONFIG_SMP */
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
loops_per_jiffy / (500000/HZ),
(loops_per_jiffy / (5000/HZ)) % 100);
#endif
/* dump out the processor features */
seq_puts(m, "Features\t: ");
for (i = 0; hwcap_str[i]; i++)
if (elf_hwcap & (1 << i))
seq_printf(m, "%s ", hwcap_str[i]);
seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
/* pre-ARM7 */
seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
} else {
if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
/* ARM7 */
seq_printf(m, "CPU variant\t: 0x%02x\n",
(read_cpuid_id() >> 16) & 127);
} else {
/* post-ARM7 */
seq_printf(m, "CPU variant\t: 0x%x\n",
(read_cpuid_id() >> 20) & 15);
}
seq_printf(m, "CPU part\t: 0x%03x\n",
(read_cpuid_id() >> 4) & 0xfff);
}
seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
seq_puts(m, "\n");
seq_printf(m, "Hardware\t: %s\n", machine_name);
seq_printf(m, "Revision\t: %04x\n", system_rev);
seq_printf(m, "Serial\t\t: %08x%08x\n",
system_serial_high, system_serial_low);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < 1 ? (void *)1 : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return NULL;
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = c_show
};
| GlitchKernel/Glitch | arch/arm/kernel/setup.c | C | gpl-2.0 | 25,452 |
/*
* fs/fs-writeback.c
*
* Copyright (C) 2002, Linus Torvalds.
*
* Contains all the functions related to writing back and waiting
* upon dirty inodes against superblocks, and writing back dirty
* pages against inodes. ie: data writeback. Writeout of the
* inode itself is not handled here.
*
* 10Apr2002 Andrew Morton
* Split out of fs/inode.c
* Additions for address_space-based writeback
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
#include <linux/tracepoint.h>
#include <trace/events/vfs.h>
#include "internal.h"
/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
struct wb_writeback_work {
long nr_pages;
struct super_block *sb;
unsigned long *older_than_this;
enum writeback_sync_modes sync_mode;
unsigned int tagged_writepages:1;
unsigned int for_kupdate:1;
unsigned int range_cyclic:1;
unsigned int for_background:1;
enum wb_reason reason; /* why was writeback initiated? */
struct list_head list; /* pending work list */
struct completion *done; /* set if the caller waits */
};
/*
* Include the creation of the trace points after defining the
* wb_writeback_work structure so that the definition remains local to this
* file.
*/
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>
/*
* We don't actually have pdflush, but this one is exported though /proc...
*/
int nr_pdflush_threads;
/**
* writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure.
*
* Determine whether there is writeback waiting to be handled against a
* backing device.
*/
int writeback_in_progress(struct backing_dev_info *bdi)
{
return test_bit(BDI_writeback_running, &bdi->state);
}
static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
if (strcmp(sb->s_type->name, "bdev") == 0)
return inode->i_mapping->backing_dev_info;
return sb->s_bdi;
}
static inline struct inode *wb_inode(struct list_head *head)
{
return list_entry(head, struct inode, i_wb_list);
}
/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
{
if (bdi->wb.task) {
wake_up_process(bdi->wb.task);
} else {
/*
* The bdi thread isn't there, wake up the forker thread which
* will create and run it.
*/
wake_up_process(default_backing_dev_info.wb.task);
}
}
static void bdi_queue_work(struct backing_dev_info *bdi,
struct wb_writeback_work *work)
{
trace_writeback_queue(bdi, work);
spin_lock_bh(&bdi->wb_lock);
list_add_tail(&work->list, &bdi->work_list);
if (!bdi->wb.task)
trace_writeback_nothread(bdi, work);
bdi_wakeup_flusher(bdi);
spin_unlock_bh(&bdi->wb_lock);
}
static void
__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
bool range_cyclic, enum wb_reason reason)
{
struct wb_writeback_work *work;
/*
* This is WB_SYNC_NONE writeback, so if allocation fails just
* wakeup the thread for old dirty data writeback
*/
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
if (bdi->wb.task) {
trace_writeback_nowork(bdi);
wake_up_process(bdi->wb.task);
}
return;
}
work->sync_mode = WB_SYNC_NONE;
work->nr_pages = nr_pages;
work->range_cyclic = range_cyclic;
work->reason = reason;
bdi_queue_work(bdi, work);
}
/**
* bdi_start_writeback - start writeback
* @bdi: the backing device to write from
* @nr_pages: the number of pages to write
* @reason: reason why some writeback work was initiated
*
* Description:
* This does WB_SYNC_NONE opportunistic writeback. The IO is only
* started when this function returns, we make no guarantees on
* completion. Caller need not hold sb s_umount semaphore.
*
*/
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
enum wb_reason reason)
{
__bdi_start_writeback(bdi, nr_pages, true, reason);
}
/**
* bdi_start_background_writeback - start background writeback
* @bdi: the backing device to write from
*
* Description:
* This makes sure WB_SYNC_NONE background writeback happens. When
* this function returns, it is only guaranteed that for given BDI
* some IO is happening if we are over background dirty threshold.
* Caller need not hold sb s_umount semaphore.
*/
void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
/*
* We just wake up the flusher thread. It will perform background
* writeback as soon as there is no other work to do.
*/
trace_writeback_wake_background(bdi);
spin_lock_bh(&bdi->wb_lock);
bdi_wakeup_flusher(bdi);
spin_unlock_bh(&bdi->wb_lock);
}
/*
* Remove the inode from the writeback list it is on.
*/
void inode_wb_list_del(struct inode *inode)
{
struct backing_dev_info *bdi = inode_to_bdi(inode);
spin_lock(&bdi->wb.list_lock);
list_del_init(&inode->i_wb_list);
spin_unlock(&bdi->wb.list_lock);
}
/*
* Redirty an inode: set its when-it-was dirtied timestamp and move it to the
* furthest end of its superblock's dirty-inode list.
*
* Before stamping the inode's ->dirtied_when, we check to see whether it is
* already the most-recently-dirtied inode on the b_dirty list. If that is
* the case then the inode must have been redirtied while it was being written
* out and we don't reset its dirtied_when.
*/
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
{
assert_spin_locked(&wb->list_lock);
if (!list_empty(&wb->b_dirty)) {
struct inode *tail;
tail = wb_inode(wb->b_dirty.next);
if (time_before(inode->dirtied_when, tail->dirtied_when))
inode->dirtied_when = jiffies;
}
list_move(&inode->i_wb_list, &wb->b_dirty);
}
/*
* requeue inode for re-scanning after bdi->b_io list is exhausted.
*/
static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
{
assert_spin_locked(&wb->list_lock);
list_move(&inode->i_wb_list, &wb->b_more_io);
}
static void inode_sync_complete(struct inode *inode)
{
/*
* Prevent speculative execution through
* spin_unlock(&wb->list_lock);
*/
smp_mb();
wake_up_bit(&inode->i_state, __I_SYNC);
}
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
/*
* For inodes being constantly redirtied, dirtied_when can get stuck.
* It _appears_ to be in the future, but is actually in distant past.
* This test is necessary to prevent such wrapped-around relative times
* from permanently stopping the whole bdi writeback.
*/
ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
return ret;
}
/*
* Move expired dirty inodes from @delaying_queue to @dispatch_queue.
*/
static int move_expired_inodes(struct list_head *delaying_queue,
struct list_head *dispatch_queue,
struct wb_writeback_work *work)
{
LIST_HEAD(tmp);
struct list_head *pos, *node;
struct super_block *sb = NULL;
struct inode *inode;
int do_sb_sort = 0;
int moved = 0;
while (!list_empty(delaying_queue)) {
inode = wb_inode(delaying_queue->prev);
if (work->older_than_this &&
inode_dirtied_after(inode, *work->older_than_this))
break;
if (sb && sb != inode->i_sb)
do_sb_sort = 1;
sb = inode->i_sb;
list_move(&inode->i_wb_list, &tmp);
moved++;
}
/* just one sb in list, splice to dispatch_queue and we're done */
if (!do_sb_sort) {
list_splice(&tmp, dispatch_queue);
goto out;
}
/* Move inodes from one superblock together */
while (!list_empty(&tmp)) {
sb = wb_inode(tmp.prev)->i_sb;
list_for_each_prev_safe(pos, node, &tmp) {
inode = wb_inode(pos);
if (inode->i_sb == sb)
list_move(&inode->i_wb_list, dispatch_queue);
}
}
out:
return moved;
}
/*
* Queue all expired dirty inodes for io, eldest first.
* Before
* newly dirtied b_dirty b_io b_more_io
* =============> gf edc BA
* After
* newly dirtied b_dirty b_io b_more_io
* =============> g fBAedc
* |
* +--> dequeue for IO
*/
static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
{
int moved;
assert_spin_locked(&wb->list_lock);
list_splice_init(&wb->b_more_io, &wb->b_io);
moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
trace_writeback_queue_io(wb, work, moved);
}
static int write_inode(struct inode *inode, struct writeback_control *wbc)
{
if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
return inode->i_sb->s_op->write_inode(inode, wbc);
return 0;
}
/*
* Wait for writeback on an inode to complete.
*/
static void inode_wait_for_writeback(struct inode *inode,
struct bdi_writeback *wb)
{
DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
wait_queue_head_t *wqh;
wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
while (inode->i_state & I_SYNC) {
spin_unlock(&inode->i_lock);
spin_unlock(&wb->list_lock);
__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
spin_lock(&wb->list_lock);
spin_lock(&inode->i_lock);
}
}
/*
* Write out an inode's dirty pages. Called under wb->list_lock and
* inode->i_lock. Either the caller has an active reference on the inode or
* the inode has I_WILL_FREE set.
*
* If `wait' is set, wait on the writeout.
*
* The whole writeout design is quite complex and fragile. We want to avoid
* starvation of particular inodes when others are being redirtied, prevent
* livelocks, etc.
*/
static int
writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
struct writeback_control *wbc)
{
struct address_space *mapping = inode->i_mapping;
long nr_to_write = wbc->nr_to_write;
unsigned dirty;
int ret;
assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
if (!atomic_read(&inode->i_count))
WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
else
WARN_ON(inode->i_state & I_WILL_FREE);
if (inode->i_state & I_SYNC) {
/*
* If this inode is locked for writeback and we are not doing
* writeback-for-data-integrity, move it to b_more_io so that
* writeback can proceed with the other inodes on s_io.
*
* We'll have another go at writing back this inode when we
* completed a full scan of b_io.
*/
if (wbc->sync_mode != WB_SYNC_ALL) {
requeue_io(inode, wb);
trace_writeback_single_inode_requeue(inode, wbc,
nr_to_write);
return 0;
}
/*
* It's a data-integrity sync. We must wait.
*/
inode_wait_for_writeback(inode, wb);
}
BUG_ON(inode->i_state & I_SYNC);
/* Set I_SYNC, reset I_DIRTY_PAGES */
inode->i_state |= I_SYNC;
inode->i_state &= ~I_DIRTY_PAGES;
spin_unlock(&inode->i_lock);
spin_unlock(&wb->list_lock);
ret = do_writepages(mapping, wbc);
/*
* Make sure to wait on the data before writing out the metadata.
* This is important for filesystems that modify metadata on data
* I/O completion.
*/
if (wbc->sync_mode == WB_SYNC_ALL) {
int err = filemap_fdatawait(mapping);
if (ret == 0)
ret = err;
}
/*
* Some filesystems may redirty the inode during the writeback
* due to delalloc, clear dirty metadata flags right before
* write_inode()
*/
spin_lock(&inode->i_lock);
dirty = inode->i_state & I_DIRTY;
inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
spin_unlock(&inode->i_lock);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
int err = write_inode(inode, wbc);
if (ret == 0)
ret = err;
}
spin_lock(&wb->list_lock);
spin_lock(&inode->i_lock);
inode->i_state &= ~I_SYNC;
if (!(inode->i_state & I_FREEING)) {
/*
* Sync livelock prevention. Each inode is tagged and synced in
* one shot. If still dirty, it will be redirty_tail()'ed below.
* Update the dirty time to prevent enqueue and sync it again.
*/
if ((inode->i_state & I_DIRTY) &&
(wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
inode->dirtied_when = jiffies;
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
/*
* We didn't write back all the pages. nfs_writepages()
* sometimes bales out without doing anything.
*/
inode->i_state |= I_DIRTY_PAGES;
if (wbc->nr_to_write <= 0) {
/*
* slice used up: queue for next turn
*/
requeue_io(inode, wb);
} else {
/*
* Writeback blocked by something other than
* congestion. Delay the inode for some time to
* avoid spinning on the CPU (100% iowait)
* retrying writeback of the dirty page/inode
* that cannot be performed immediately.
*/
redirty_tail(inode, wb);
}
} else if (inode->i_state & I_DIRTY) {
/*
* Filesystems can dirty the inode during writeback
* operations, such as delayed allocation during
* submission or metadata updates after data IO
* completion.
*/
redirty_tail(inode, wb);
} else {
/*
* The inode is clean. At this point we either have
* a reference to the inode or it's on it's way out.
* No need to add it back to the LRU.
*/
list_del_init(&inode->i_wb_list);
}
}
inode_sync_complete(inode);
trace_writeback_single_inode(inode, wbc, nr_to_write);
return ret;
}
static long writeback_chunk_size(struct backing_dev_info *bdi,
struct wb_writeback_work *work)
{
long pages;
/*
* WB_SYNC_ALL mode does livelock avoidance by syncing dirty
* inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
* here avoids calling into writeback_inodes_wb() more than once.
*
* The intended call sequence for WB_SYNC_ALL writeback is:
*
* wb_writeback()
* writeback_sb_inodes() <== called only once
* write_cache_pages() <== called once for each inode
* (quickly) tag currently dirty pages
* (maybe slowly) sync all tagged pages
*/
if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
pages = LONG_MAX;
else {
pages = min(bdi->avg_write_bandwidth / 2,
global_dirty_limit / DIRTY_SCOPE);
pages = min(pages, work->nr_pages);
pages = round_down(pages + MIN_WRITEBACK_PAGES,
MIN_WRITEBACK_PAGES);
}
return pages;
}
/*
* Write a portion of b_io inodes which belong to @sb.
*
* If @only_this_sb is true, then find and write all such
* inodes. Otherwise write only ones which go sequentially
* in reverse order.
*
* Return the number of pages and/or inodes written.
*/
static long writeback_sb_inodes(struct super_block *sb,
struct bdi_writeback *wb,
struct wb_writeback_work *work)
{
struct writeback_control wbc = {
.sync_mode = work->sync_mode,
.tagged_writepages = work->tagged_writepages,
.for_kupdate = work->for_kupdate,
.for_background = work->for_background,
.range_cyclic = work->range_cyclic,
.range_start = 0,
.range_end = LLONG_MAX,
};
unsigned long start_time = jiffies;
long write_chunk;
long wrote = 0; /* count both pages and inodes */
while (!list_empty(&wb->b_io)) {
struct inode *inode = wb_inode(wb->b_io.prev);
if (inode->i_sb != sb) {
if (work->sb) {
/*
* We only want to write back data for this
* superblock, move all inodes not belonging
* to it back onto the dirty list.
*/
redirty_tail(inode, wb);
continue;
}
/*
* The inode belongs to a different superblock.
* Bounce back to the caller to unpin this and
* pin the next superblock.
*/
break;
}
/*
* Don't bother with new inodes or inodes beeing freed, first
* kind does not need peridic writeout yet, and for the latter
* kind writeout is handled by the freer.
*/
spin_lock(&inode->i_lock);
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
spin_unlock(&inode->i_lock);
redirty_tail(inode, wb);
continue;
}
__iget(inode);
write_chunk = writeback_chunk_size(wb->bdi, work);
wbc.nr_to_write = write_chunk;
wbc.pages_skipped = 0;
writeback_single_inode(inode, wb, &wbc);
work->nr_pages -= write_chunk - wbc.nr_to_write;
wrote += write_chunk - wbc.nr_to_write;
if (!(inode->i_state & I_DIRTY))
wrote++;
if (wbc.pages_skipped) {
/*
* writeback is not making progress due to locked
* buffers. Skip this inode for now.
*/
redirty_tail(inode, wb);
}
spin_unlock(&inode->i_lock);
spin_unlock(&wb->list_lock);
iput(inode);
cond_resched();
spin_lock(&wb->list_lock);
/*
* bail out to wb_writeback() often enough to check
* background threshold and other termination conditions.
*/
if (wrote) {
if (time_is_before_jiffies(start_time + HZ / 10UL))
break;
if (work->nr_pages <= 0)
break;
}
}
return wrote;
}
static long __writeback_inodes_wb(struct bdi_writeback *wb,
struct wb_writeback_work *work)
{
unsigned long start_time = jiffies;
long wrote = 0;
while (!list_empty(&wb->b_io)) {
struct inode *inode = wb_inode(wb->b_io.prev);
struct super_block *sb = inode->i_sb;
if (!grab_super_passive(sb)) {
/*
* grab_super_passive() may fail consistently due to
* s_umount being grabbed by someone else. Don't use
* requeue_io() to avoid busy retrying the inode/sb.
*/
redirty_tail(inode, wb);
continue;
}
wrote += writeback_sb_inodes(sb, wb, work);
drop_super(sb);
/* refer to the same tests at the end of writeback_sb_inodes */
if (wrote) {
if (time_is_before_jiffies(start_time + HZ / 10UL))
break;
if (work->nr_pages <= 0)
break;
}
}
/* Leave any unwritten inodes on b_io */
return wrote;
}
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
enum wb_reason reason)
{
struct wb_writeback_work work = {
.nr_pages = nr_pages,
.sync_mode = WB_SYNC_NONE,
.range_cyclic = 1,
.reason = reason,
};
spin_lock(&wb->list_lock);
if (list_empty(&wb->b_io))
queue_io(wb, &work);
__writeback_inodes_wb(wb, &work);
spin_unlock(&wb->list_lock);
return nr_pages - work.nr_pages;
}
static bool over_bground_thresh(struct backing_dev_info *bdi)
{
unsigned long background_thresh, dirty_thresh;
global_dirty_limits(&background_thresh, &dirty_thresh);
if (global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) > background_thresh)
return true;
if (bdi_stat(bdi, BDI_RECLAIMABLE) >
bdi_dirty_limit(bdi, background_thresh))
return true;
return false;
}
/*
* Called under wb->list_lock. If there are multiple wb per bdi,
* only the flusher working on the first wb should do it.
*/
static void wb_update_bandwidth(struct bdi_writeback *wb,
unsigned long start_time)
{
__bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
}
/*
* Explicit flushing or periodic writeback of "old" data.
*
* Define "old": the first time one of an inode's pages is dirtied, we mark the
* dirtying-time in the inode's address_space. So this periodic writeback code
* just walks the superblock inode list, writing back any inodes which are
* older than a specific point in time.
*
* Try to run once per dirty_writeback_interval. But if a writeback event
* takes longer than a dirty_writeback_interval interval, then leave a
* one-second gap.
*
* older_than_this takes precedence over nr_to_write. So we'll only write back
* all dirty pages if they are all attached to "old" mappings.
*/
static long wb_writeback(struct bdi_writeback *wb,
struct wb_writeback_work *work)
{
unsigned long wb_start = jiffies;
long nr_pages = work->nr_pages;
unsigned long oldest_jif;
struct inode *inode;
long progress;
oldest_jif = jiffies;
work->older_than_this = &oldest_jif;
spin_lock(&wb->list_lock);
for (;;) {
/*
* Stop writeback when nr_pages has been consumed
*/
if (work->nr_pages <= 0)
break;
/*
* Background writeout and kupdate-style writeback may
* run forever. Stop them if there is other work to do
* so that e.g. sync can proceed. They'll be restarted
* after the other works are all done.
*/
if ((work->for_background || work->for_kupdate) &&
!list_empty(&wb->bdi->work_list))
break;
/*
* For background writeout, stop when we are below the
* background dirty threshold
*/
if (work->for_background && !over_bground_thresh(wb->bdi))
break;
if (work->for_kupdate) {
oldest_jif = jiffies -
msecs_to_jiffies(dirty_expire_interval * 10);
work->older_than_this = &oldest_jif;
}
trace_writeback_start(wb->bdi, work);
if (list_empty(&wb->b_io))
queue_io(wb, work);
if (work->sb)
progress = writeback_sb_inodes(work->sb, wb, work);
else
progress = __writeback_inodes_wb(wb, work);
trace_writeback_written(wb->bdi, work);
wb_update_bandwidth(wb, wb_start);
/*
* Did we write something? Try for more
*
* Dirty inodes are moved to b_io for writeback in batches.
* The completion of the current batch does not necessarily
* mean the overall work is done. So we keep looping as long
* as made some progress on cleaning pages or inodes.
*/
if (progress)
continue;
/*
* No more inodes for IO, bail
*/
if (list_empty(&wb->b_more_io))
break;
/*
* Nothing written. Wait for some inode to
* become available for writeback. Otherwise
* we'll just busyloop.
*/
if (!list_empty(&wb->b_more_io)) {
trace_writeback_wait(wb->bdi, work);
inode = wb_inode(wb->b_more_io.prev);
spin_lock(&inode->i_lock);
inode_wait_for_writeback(inode, wb);
spin_unlock(&inode->i_lock);
}
}
spin_unlock(&wb->list_lock);
return nr_pages - work->nr_pages;
}
/*
* Return the next wb_writeback_work struct that hasn't been processed yet.
*/
static struct wb_writeback_work *
get_next_work_item(struct backing_dev_info *bdi)
{
struct wb_writeback_work *work = NULL;
spin_lock_bh(&bdi->wb_lock);
if (!list_empty(&bdi->work_list)) {
work = list_entry(bdi->work_list.next,
struct wb_writeback_work, list);
list_del_init(&work->list);
}
spin_unlock_bh(&bdi->wb_lock);
return work;
}
/*
* Add in the number of potentially dirty inodes, because each inode
* write can dirty pagecache in the underlying blockdev.
*/
static unsigned long get_nr_dirty_pages(void)
{
return global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) +
get_nr_dirty_inodes();
}
static long wb_check_background_flush(struct bdi_writeback *wb)
{
if (over_bground_thresh(wb->bdi)) {
struct wb_writeback_work work = {
.nr_pages = LONG_MAX,
.sync_mode = WB_SYNC_NONE,
.for_background = 1,
.range_cyclic = 1,
.reason = WB_REASON_BACKGROUND,
};
return wb_writeback(wb, &work);
}
return 0;
}
static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
unsigned long expired;
long nr_pages;
/*
* When set to zero, disable periodic writeback
*/
if (!dirty_writeback_interval)
return 0;
expired = wb->last_old_flush +
msecs_to_jiffies(dirty_writeback_interval * 10);
if (time_before(jiffies, expired))
return 0;
wb->last_old_flush = jiffies;
nr_pages = get_nr_dirty_pages();
if (nr_pages) {
struct wb_writeback_work work = {
.nr_pages = nr_pages,
.sync_mode = WB_SYNC_NONE,
.for_kupdate = 1,
.range_cyclic = 1,
.reason = WB_REASON_PERIODIC,
};
return wb_writeback(wb, &work);
}
return 0;
}
/*
* Retrieve work items and do the writeback they describe
*/
long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
{
struct backing_dev_info *bdi = wb->bdi;
struct wb_writeback_work *work;
long wrote = 0;
set_bit(BDI_writeback_running, &wb->bdi->state);
while ((work = get_next_work_item(bdi)) != NULL) {
/*
* Override sync mode, in case we must wait for completion
* because this thread is exiting now.
*/
if (force_wait)
work->sync_mode = WB_SYNC_ALL;
trace_writeback_exec(bdi, work);
wrote += wb_writeback(wb, work);
/*
* Notify the caller of completion if this is a synchronous
* work item, otherwise just free it.
*/
if (work->done)
complete(work->done);
else
kfree(work);
}
/*
* Check for periodic writeback, kupdated() style
*/
wrote += wb_check_old_data_flush(wb);
wrote += wb_check_background_flush(wb);
clear_bit(BDI_writeback_running, &wb->bdi->state);
return wrote;
}
/*
* Handle writeback of dirty data for the device backed by this bdi. Also
* wakes up periodically and does kupdated style flushing.
*/
int bdi_writeback_thread(void *data)
{
struct bdi_writeback *wb = data;
struct backing_dev_info *bdi = wb->bdi;
long pages_written;
current->flags |= PF_SWAPWRITE;
set_freezable();
wb->last_active = jiffies;
/*
* Our parent may run at a different priority, just set us to normal
*/
set_user_nice(current, 0);
trace_writeback_thread_start(bdi);
while (!kthread_should_stop()) {
/*
* Remove own delayed wake-up timer, since we are already awake
* and we'll take care of the preriodic write-back.
*/
del_timer(&wb->wakeup_timer);
pages_written = wb_do_writeback(wb, 0);
trace_writeback_pages_written(pages_written);
if (pages_written)
wb->last_active = jiffies;
set_current_state(TASK_INTERRUPTIBLE);
if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
continue;
}
if (wb_has_dirty_io(wb) && dirty_writeback_interval)
schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
else {
/*
* We have nothing to do, so can go sleep without any
* timeout and save power. When a work is queued or
* something is made dirty - we will be woken up.
*/
schedule();
}
try_to_freeze();
}
/* Flush any work that raced with us exiting */
if (!list_empty(&bdi->work_list))
wb_do_writeback(wb, 1);
trace_writeback_thread_stop(bdi);
return 0;
}
/*
* Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
* the whole world.
*/
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
{
struct backing_dev_info *bdi;
if (!nr_pages) {
nr_pages = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
}
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
if (!bdi_has_dirty_io(bdi))
continue;
__bdi_start_writeback(bdi, nr_pages, false, reason);
}
rcu_read_unlock();
}
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
struct dentry *dentry;
const char *name = "?";
dentry = d_find_alias(inode);
if (dentry) {
spin_lock(&dentry->d_lock);
name = (const char *) dentry->d_name.name;
}
printk(KERN_DEBUG
"%s(%d): dirtied inode %lu (%s) on %s\n",
current->comm, task_pid_nr(current), inode->i_ino,
name, inode->i_sb->s_id);
if (dentry) {
spin_unlock(&dentry->d_lock);
dput(dentry);
}
}
}
/**
* __mark_inode_dirty - internal function
* @inode: inode to mark
* @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
* Mark an inode as dirty. Callers should use mark_inode_dirty or
* mark_inode_dirty_sync.
*
* Put the inode on the super block's dirty list.
*
* CAREFUL! We mark it dirty unconditionally, but move it onto the
* dirty list only if it is hashed or if it refers to a blockdev.
* If it was not hashed, it will never be added to the dirty list
* even if it is later hashed, as it will have been marked dirty already.
*
* In short, make sure you hash any inodes _before_ you start marking
* them dirty.
*
* Note that for blockdevs, inode->dirtied_when represents the dirtying time of
* the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
* the kernel-internal blockdev inode represents the dirtying time of the
* blockdev's pages. This is why for I_DIRTY_PAGES we always use
* page->mapping->host, so the page-dirtying time is recorded in the internal
* blockdev inode.
*/
void __mark_inode_dirty(struct inode *inode, int flags)
{
struct super_block *sb = inode->i_sb;
struct backing_dev_info *bdi = NULL;
/*
* Don't do this for I_DIRTY_PAGES - that doesn't actually
* dirty the inode itself
*/
if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
if (sb->s_op->dirty_inode)
sb->s_op->dirty_inode(inode, flags);
}
/*
* make sure that changes are seen by all cpus before we test i_state
* -- mikulas
*/
smp_mb();
/* avoid the locking if we can */
if ((inode->i_state & flags) == flags)
return;
trace_dirty_inode(inode, current);
if (unlikely(block_dump))
block_dump___mark_inode_dirty(inode);
spin_lock(&inode->i_lock);
if ((inode->i_state & flags) != flags) {
const int was_dirty = inode->i_state & I_DIRTY;
inode->i_state |= flags;
/*
* If the inode is being synced, just update its dirty state.
* The unlocker will place the inode on the appropriate
* superblock list, based upon its state.
*/
if (inode->i_state & I_SYNC)
goto out_unlock_inode;
/*
* Only add valid (hashed) inodes to the superblock's
* dirty list. Add blockdev inodes as well.
*/
if (!S_ISBLK(inode->i_mode)) {
if (inode_unhashed(inode))
goto out_unlock_inode;
}
if (inode->i_state & I_FREEING)
goto out_unlock_inode;
/*
* If the inode was already on b_dirty/b_io/b_more_io, don't
* reposition it (that would break b_dirty time-ordering).
*/
if (!was_dirty) {
bool wakeup_bdi = false;
bdi = inode_to_bdi(inode);
if (bdi_cap_writeback_dirty(bdi)) {
WARN(!test_bit(BDI_registered, &bdi->state),
"bdi-%s not registered\n", bdi->name);
/*
* If this is the first dirty inode for this
* bdi, we have to wake-up the corresponding
* bdi thread to make sure background
* write-back happens later.
*/
if (!wb_has_dirty_io(&bdi->wb))
wakeup_bdi = true;
}
spin_unlock(&inode->i_lock);
spin_lock(&bdi->wb.list_lock);
inode->dirtied_when = jiffies;
list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
spin_unlock(&bdi->wb.list_lock);
if (wakeup_bdi)
bdi_wakeup_thread_delayed(bdi);
return;
}
}
out_unlock_inode:
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(__mark_inode_dirty);
/*
* Write out a superblock's list of dirty inodes. A wait will be performed
* upon no inodes, all inodes or the final one, depending upon sync_mode.
*
* If older_than_this is non-NULL, then only write out inodes which
* had their first dirtying at a time earlier than *older_than_this.
*
* If `bdi' is non-zero then we're being asked to writeback a specific queue.
* This function assumes that the blockdev superblock's inodes are backed by
* a variety of queues, so all inodes are searched. For other superblocks,
* assume that all inodes are backed by the same queue.
*
* The inodes to be written are parked on bdi->b_io. They are moved back onto
* bdi->b_dirty as they are selected for writing. This way, none can be missed
* on the writer throttling path, and we get decent balancing between many
* throttled threads: we don't want them all piling up on inode_sync_wait.
*/
static void wait_sb_inodes(struct super_block *sb)
{
struct inode *inode, *old_inode = NULL;
/*
* We need to be protected against the filesystem going from
* r/o to r/w or vice versa.
*/
WARN_ON(!rwsem_is_locked(&sb->s_umount));
spin_lock(&inode_sb_list_lock);
/*
* Data integrity sync. Must wait for all pages under writeback,
* because there may have been pages dirtied before our sync
* call, but which had writeout started before we write it out.
* In which case, the inode may not be on the dirty list, but
* we still have to wait for that writeout.
*/
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
struct address_space *mapping = inode->i_mapping;
spin_lock(&inode->i_lock);
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
(mapping->nrpages == 0)) {
spin_unlock(&inode->i_lock);
continue;
}
__iget(inode);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_sb_list_lock);
/*
* We hold a reference to 'inode' so it couldn't have been
* removed from s_inodes list while we dropped the
* inode_sb_list_lock. We cannot iput the inode now as we can
* be holding the last reference and we cannot iput it under
* inode_sb_list_lock. So we keep the reference and iput it
* later.
*/
iput(old_inode);
old_inode = inode;
filemap_fdatawait(mapping);
cond_resched();
spin_lock(&inode_sb_list_lock);
}
spin_unlock(&inode_sb_list_lock);
iput(old_inode);
}
/**
* writeback_inodes_sb_nr - writeback dirty inodes from given super_block
* @sb: the superblock
* @nr: the number of pages to write
* @reason: reason why some writeback work initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
* for IO completion of submitted IO.
*/
void writeback_inodes_sb_nr(struct super_block *sb,
unsigned long nr,
enum wb_reason reason)
{
DECLARE_COMPLETION_ONSTACK(done);
struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_NONE,
.tagged_writepages = 1,
.done = &done,
.nr_pages = nr,
.reason = reason,
};
WARN_ON(!rwsem_is_locked(&sb->s_umount));
bdi_queue_work(sb->s_bdi, &work);
wait_for_completion(&done);
}
EXPORT_SYMBOL(writeback_inodes_sb_nr);
/**
* writeback_inodes_sb - writeback dirty inodes from given super_block
* @sb: the superblock
* @reason: reason why some writeback work was initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
* for IO completion of submitted IO.
*/
void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
{
return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
}
EXPORT_SYMBOL(writeback_inodes_sb);
/**
* writeback_inodes_sb_if_idle - start writeback if none underway
* @sb: the superblock
* @reason: reason why some writeback work was initiated
*
* Invoke writeback_inodes_sb if no writeback is currently underway.
* Returns 1 if writeback was started, 0 if not.
*/
int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason)
{
if (!writeback_in_progress(sb->s_bdi)) {
down_read(&sb->s_umount);
writeback_inodes_sb(sb, reason);
up_read(&sb->s_umount);
return 1;
} else
return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
/**
* writeback_inodes_sb_if_idle - start writeback if none underway
* @sb: the superblock
* @nr: the number of pages to write
* @reason: reason why some writeback work was initiated
*
* Invoke writeback_inodes_sb if no writeback is currently underway.
* Returns 1 if writeback was started, 0 if not.
*/
int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
unsigned long nr,
enum wb_reason reason)
{
if (!writeback_in_progress(sb->s_bdi)) {
down_read(&sb->s_umount);
writeback_inodes_sb_nr(sb, nr, reason);
up_read(&sb->s_umount);
return 1;
} else
return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);
/**
* sync_inodes_sb - sync sb inode pages
* @sb: the superblock
*
* This function writes and waits on any dirty inode belonging to this
* super_block.
*/
void sync_inodes_sb(struct super_block *sb)
{
DECLARE_COMPLETION_ONSTACK(done);
struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_ALL,
.nr_pages = LONG_MAX,
.range_cyclic = 0,
.done = &done,
.reason = WB_REASON_SYNC,
};
WARN_ON(!rwsem_is_locked(&sb->s_umount));
bdi_queue_work(sb->s_bdi, &work);
wait_for_completion(&done);
wait_sb_inodes(sb);
}
EXPORT_SYMBOL(sync_inodes_sb);
/**
* write_inode_now - write an inode to disk
* @inode: inode to write to disk
* @sync: whether the write should be synchronous or not
*
* This function commits an inode to disk immediately if it is dirty. This is
* primarily needed by knfsd.
*
* The caller must either have a ref on the inode or must have set I_WILL_FREE.
*/
int write_inode_now(struct inode *inode, int sync)
{
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
int ret;
struct writeback_control wbc = {
.nr_to_write = LONG_MAX,
.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
.range_start = 0,
.range_end = LLONG_MAX,
};
if (!mapping_cap_writeback_dirty(inode->i_mapping))
wbc.nr_to_write = 0;
might_sleep();
spin_lock(&wb->list_lock);
spin_lock(&inode->i_lock);
ret = writeback_single_inode(inode, wb, &wbc);
spin_unlock(&inode->i_lock);
spin_unlock(&wb->list_lock);
if (sync)
inode_sync_wait(inode);
return ret;
}
EXPORT_SYMBOL(write_inode_now);
/**
* sync_inode - write an inode and its pages to disk.
* @inode: the inode to sync
* @wbc: controls the writeback mode
*
* sync_inode() will write an inode and its pages to disk. It will also
* correctly update the inode on its superblock's dirty inode lists and will
* update inode->i_state.
*
* The caller must have a ref on the inode.
*/
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
int ret;
spin_lock(&wb->list_lock);
spin_lock(&inode->i_lock);
ret = writeback_single_inode(inode, wb, wbc);
spin_unlock(&inode->i_lock);
spin_unlock(&wb->list_lock);
return ret;
}
EXPORT_SYMBOL(sync_inode);
/**
* sync_inode_metadata - write an inode to disk
* @inode: the inode to sync
* @wait: wait for I/O to complete.
*
* Write an inode to disk and adjust its dirty state after completion.
*
* Note: only writes the actual inode, no associated data or other metadata.
*/
int sync_inode_metadata(struct inode *inode, int wait)
{
struct writeback_control wbc = {
.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
.nr_to_write = 0, /* metadata-only */
};
return sync_inode(inode, &wbc);
}
EXPORT_SYMBOL(sync_inode_metadata);
| xplodwild/packaged-linux-linaro-3.2-ci | fs/fs-writeback.c | C | gpl-2.0 | 38,100 |
/*
Linux PINMUX.C
*/
#include <asm/arch/am_regs.h>
#include <asm/arch/am_eth_reg.h>
#include <asm/arch/pinmux.h>
int clear_mio_mux(unsigned mux_index, unsigned mux_mask)
{
unsigned mux_reg[] = {PERIPHS_PIN_MUX_0, PERIPHS_PIN_MUX_1, PERIPHS_PIN_MUX_2,PERIPHS_PIN_MUX_3,
PERIPHS_PIN_MUX_4,PERIPHS_PIN_MUX_5,PERIPHS_PIN_MUX_6,PERIPHS_PIN_MUX_7,PERIPHS_PIN_MUX_8,
PERIPHS_PIN_MUX_9,PERIPHS_PIN_MUX_10,PERIPHS_PIN_MUX_11,PERIPHS_PIN_MUX_12};
if (mux_index < 13) {
CLEAR_CBUS_REG_MASK(mux_reg[mux_index], mux_mask);
return 0;
}
return -1;
}
int set_mio_mux(unsigned mux_index, unsigned mux_mask)
{
unsigned mux_reg[] = {PERIPHS_PIN_MUX_0, PERIPHS_PIN_MUX_1, PERIPHS_PIN_MUX_2,PERIPHS_PIN_MUX_3,
PERIPHS_PIN_MUX_4,PERIPHS_PIN_MUX_5,PERIPHS_PIN_MUX_6,PERIPHS_PIN_MUX_7,PERIPHS_PIN_MUX_8,
PERIPHS_PIN_MUX_9,PERIPHS_PIN_MUX_10,PERIPHS_PIN_MUX_11,PERIPHS_PIN_MUX_12};
if (mux_index < 13) {
SET_CBUS_REG_MASK(mux_reg[mux_index], mux_mask);
return 0;
}
return -1;
}
/*
call it before pinmux init;
call it before soft reset;
*/
void clearall_pinmux(void)
{
int i;
for(i=0;i<13;i++)
clear_mio_mux(i,0xffffffff);
return;
}
/*ETH PINMUX SETTING
More details can get from am_eth_pinmux.h
*/
int eth_set_pinmux(int bank_id,int clk_in_out_id,unsigned long ext_msk)
{
int ret=0;
switch(bank_id)
{
case ETH_BANK0_GPIOX46_X54:
if(ext_msk>0)
set_mio_mux(ETH_BANK0_REG1,ext_msk);
else
set_mio_mux(ETH_BANK0_REG1,ETH_BANK0_REG1_VAL);
break;
case ETH_BANK1_GPIOX59_X67:
if(ext_msk>0)
set_mio_mux(ETH_BANK1_REG1,ext_msk);
else
set_mio_mux(ETH_BANK1_REG1,ETH_BANK1_REG1_VAL);
break;
default:
printf("UNknow pinmux setting of ethernet!error bankid=%d,must be 0-2\n",bank_id);
ret=-1;
}
switch(clk_in_out_id)
{
case ETH_CLK_IN_GPIOX45_REG3_11:
set_mio_mux(3,1<<11);
break;
case ETH_CLK_IN_GPIOX55_REG3_0:
set_mio_mux(3,1);
break;
case ETH_CLK_IN_GPIOX58_REG3_24:
set_mio_mux(3,1<<24);
break;
case ETH_CLK_IN_GPIOX68_REG3_13:
set_mio_mux(3,1<<13);
break;
case ETH_CLK_OUT_GPIOX45_REG3_12:
set_mio_mux(3,1<<12);
break;
case ETH_CLK_OUT_GPIOX55_REG3_1:
set_mio_mux(3,1<<1);
break;
case ETH_CLK_OUT_GPIOX58_REG3_25:
set_mio_mux(3,1<<25);
break;
case ETH_CLK_OUT_GPIOX68_REG3_14:
set_mio_mux(3,1<<14);
break;
default:
printf("UNknow clk_in_out_id setting of ethernet!error clk_in_out_id=%d,must be 0-7\n",clk_in_out_id);
ret=-1;
}
return ret;
}
| bogdanov-d-a/Amlogic-reff16-uboot | arch/arm/cpu/aml_meson/m2/pinmux.c | C | gpl-2.0 | 2,559 |
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Alan Korr
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include "config.h"
#include "hwcompat.h"
#include "kernel.h"
#include "lcd.h"
#include "system.h"
/*** definitions ***/
#define LCD_SET_LOWER_COLUMN_ADDRESS ((char)0x00)
#define LCD_SET_HIGHER_COLUMN_ADDRESS ((char)0x10)
#define LCD_SET_INTERNAL_REGULATOR_RESISTOR_RATIO ((char)0x20)
#define LCD_SET_POWER_CONTROL_REGISTER ((char)0x28)
#define LCD_SET_DISPLAY_START_LINE ((char)0x40)
#define LCD_SET_CONTRAST_CONTROL_REGISTER ((char)0x81)
#define LCD_SET_SEGMENT_REMAP ((char)0xA0)
#define LCD_SET_LCD_BIAS ((char)0xA2)
#define LCD_SET_ENTIRE_DISPLAY_OFF ((char)0xA4)
#define LCD_SET_ENTIRE_DISPLAY_ON ((char)0xA5)
#define LCD_SET_NORMAL_DISPLAY ((char)0xA6)
#define LCD_SET_REVERSE_DISPLAY ((char)0xA7)
#define LCD_SET_MULTIPLEX_RATIO ((char)0xA8)
#define LCD_SET_BIAS_TC_OSC ((char)0xA9)
#define LCD_SET_1OVER4_BIAS_RATIO ((char)0xAA)
#define LCD_SET_INDICATOR_OFF ((char)0xAC)
#define LCD_SET_INDICATOR_ON ((char)0xAD)
#define LCD_SET_DISPLAY_OFF ((char)0xAE)
#define LCD_SET_DISPLAY_ON ((char)0xAF)
#define LCD_SET_PAGE_ADDRESS ((char)0xB0)
#define LCD_SET_COM_OUTPUT_SCAN_DIRECTION ((char)0xC0)
#define LCD_SET_TOTAL_FRAME_PHASES ((char)0xD2)
#define LCD_SET_DISPLAY_OFFSET ((char)0xD3)
#define LCD_SET_READ_MODIFY_WRITE_MODE ((char)0xE0)
#define LCD_SOFTWARE_RESET ((char)0xE2)
#define LCD_NOP ((char)0xE3)
#define LCD_SET_END_OF_READ_MODIFY_WRITE_MODE ((char)0xEE)
/* LCD command codes */
#define LCD_CNTL_RESET 0xe2 /* Software reset */
#define LCD_CNTL_POWER 0x2f /* Power control */
#define LCD_CNTL_CONTRAST 0x81 /* Contrast */
#define LCD_CNTL_OUTSCAN 0xc8 /* Output scan direction */
#define LCD_CNTL_SEGREMAP 0xa1 /* Segment remap */
#define LCD_CNTL_DISPON 0xaf /* Display on */
#define LCD_CNTL_PAGE 0xb0 /* Page address */
#define LCD_CNTL_HIGHCOL 0x10 /* Upper column address */
#define LCD_CNTL_LOWCOL 0x00 /* Lower column address */
/** globals **/
static int xoffset; /* needed for flip */
/*** hardware configuration ***/
int lcd_default_contrast(void)
{
return (HW_MASK & LCD_CONTRAST_BIAS) ? 31 : 49;
}
void lcd_set_contrast(int val)
{
lcd_write_command(LCD_CNTL_CONTRAST);
lcd_write_command(val);
}
void lcd_set_invert_display(bool yesno)
{
if (yesno)
lcd_write_command(LCD_SET_REVERSE_DISPLAY);
else
lcd_write_command(LCD_SET_NORMAL_DISPLAY);
}
/* turn the display upside down (call lcd_update() afterwards) */
void lcd_set_flip(bool yesno)
{
#ifdef HAVE_DISPLAY_FLIPPED
if (!yesno)
#else
if (yesno)
#endif
{
lcd_write_command(LCD_SET_SEGMENT_REMAP);
lcd_write_command(LCD_SET_COM_OUTPUT_SCAN_DIRECTION);
xoffset = 132 - LCD_WIDTH; /* 132 colums minus the 112 we have */
}
else
{
lcd_write_command(LCD_SET_SEGMENT_REMAP | 0x01);
lcd_write_command(LCD_SET_COM_OUTPUT_SCAN_DIRECTION | 0x08);
xoffset = 0;
}
}
void lcd_init_device(void)
{
/* Initialize PB0-3 as output pins */
PBCR2 &= 0xff00; /* MD = 00 */
PBIOR |= 0x000f; /* IOR = 1 */
/* inits like the original firmware */
lcd_write_command(LCD_SOFTWARE_RESET);
lcd_write_command(LCD_SET_INTERNAL_REGULATOR_RESISTOR_RATIO + 4);
lcd_write_command(LCD_SET_1OVER4_BIAS_RATIO + 0); /* force 1/4 bias: 0 */
lcd_write_command(LCD_SET_POWER_CONTROL_REGISTER + 7);
/* power control register: op-amp=1, regulator=1, booster=1 */
lcd_write_command(LCD_SET_DISPLAY_ON);
lcd_write_command(LCD_SET_NORMAL_DISPLAY);
lcd_set_flip(false);
lcd_write_command(LCD_SET_DISPLAY_START_LINE + 0);
lcd_set_contrast(lcd_default_contrast());
lcd_write_command(LCD_SET_PAGE_ADDRESS);
lcd_write_command(LCD_SET_LOWER_COLUMN_ADDRESS + 0);
lcd_write_command(LCD_SET_HIGHER_COLUMN_ADDRESS + 0);
lcd_clear_display();
lcd_update();
}
/*** Update functions ***/
/* Performance function that works with an external buffer
note that by and bheight are in 8-pixel units! */
void lcd_blit_mono(const unsigned char *data, int x, int by, int width,
int bheight, int stride)
{
/* Copy display bitmap to hardware */
while (bheight--)
{
lcd_write_command (LCD_CNTL_PAGE | (by++ & 0xf));
lcd_write_command (LCD_CNTL_HIGHCOL | (((x+xoffset)>>4) & 0xf));
lcd_write_command (LCD_CNTL_LOWCOL | ((x+xoffset) & 0xf));
lcd_write_data(data, width);
data += stride;
}
}
/* Helper function for lcd_grey_phase_blit(). */
void lcd_grey_data(unsigned char *values, unsigned char *phases, int count);
/* Performance function that works with an external buffer
note that by and bheight are in 8-pixel units! */
void lcd_blit_grey_phase(unsigned char *values, unsigned char *phases,
int x, int by, int width, int bheight, int stride)
{
stride <<= 3; /* 8 pixels per block */
while (bheight--)
{
lcd_write_command (LCD_CNTL_PAGE | (by++ & 0xf));
lcd_write_command (LCD_CNTL_HIGHCOL | (((x+xoffset)>>4) & 0xf));
lcd_write_command (LCD_CNTL_LOWCOL | ((x+xoffset) & 0xf));
lcd_grey_data(values, phases, width);
values += stride;
phases += stride;
}
}
/* Update the display.
This must be called after all other LCD functions that change the display. */
void lcd_update(void)
{
int y;
/* Copy display bitmap to hardware */
for (y = 0; y < LCD_FBHEIGHT; y++)
{
lcd_write_command (LCD_CNTL_PAGE | (y & 0xf));
lcd_write_command (LCD_CNTL_HIGHCOL | ((xoffset >> 4) & 0xf));
lcd_write_command (LCD_CNTL_LOWCOL | (xoffset & 0xf));
lcd_write_data (FBADDR(0, y), LCD_WIDTH);
}
}
/* Update a fraction of the display. */
void lcd_update_rect(int x, int y, int width, int height)
{
int ymax;
/* The Y coordinates have to work on even 8 pixel rows */
ymax = (y + height-1) >> 3;
y >>= 3;
if(x + width > LCD_WIDTH)
width = LCD_WIDTH - x;
if (width <= 0)
return; /* nothing left to do, 0 is harmful to lcd_write_data() */
if(ymax >= LCD_FBHEIGHT)
ymax = LCD_FBHEIGHT-1;
/* Copy specified rectange bitmap to hardware */
for (; y <= ymax; y++)
{
lcd_write_command (LCD_CNTL_PAGE | (y & 0xf));
lcd_write_command (LCD_CNTL_HIGHCOL | (((x+xoffset) >> 4) & 0xf));
lcd_write_command (LCD_CNTL_LOWCOL | ((x+xoffset) & 0xf));
lcd_write_data (FBADDR(x,y), width);
}
}
| renolui/RenoStudio | Player/firmware/target/sh/archos/lcd-archos-bitmap.c | C | gpl-2.0 | 7,922 |
/*
* Customer code to add GPIO control during WLAN start/stop
* Copyright (C) 1999-2010, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: dhd_custom_gpio.c,v 1.1.4.8.4.4 2011/01/20 20:23:09 Exp $
*/
#include <typedefs.h>
#include <linuxver.h>
#include <osl.h>
#include <bcmutils.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <wlioctl.h>
#include <wl_iw.h>
#define WL_ERROR(x) printf x
#define WL_TRACE(x)
#ifdef CUSTOMER_HW_SAMSUNG
extern void bcm_wlan_power_off(int);
extern void bcm_wlan_power_on(int);
extern void wlan_setup_power(int, int);
#endif /* CUSTOMER_HW_SAMSUNG */
#ifdef CUSTOMER_HW
extern void bcm_wlan_power_off(int);
extern void bcm_wlan_power_on(int);
#endif /* CUSTOMER_HW */
#ifdef CUSTOMER_HW2
int wifi_set_carddetect(int on);
int wifi_set_power(int on, unsigned long msec);
int wifi_get_irq_number(unsigned long *irq_flags_ptr);
int wifi_get_mac_addr(unsigned char *buf);
void *wifi_get_country_code(char *ccode);
#endif
#if defined(OOB_INTR_ONLY)
#if defined(BCMLXSDMMC)
extern int sdioh_mmc_irq(int irq);
#endif /* (BCMLXSDMMC) */
#ifdef CUSTOMER_HW3
#include <mach/gpio.h>
#endif
/* Customer specific Host GPIO defintion */
/* Customer specific Host GPIO defintion */
#ifdef CUSTOMER_HW_SAMSUNG
static int dhd_oob_gpio_num = IRQ_EINT(20);
#else
static int dhd_oob_gpio_num = -1; /* GG 19 */
#endif
module_param(dhd_oob_gpio_num, int, 0644);
MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr)
{
int host_oob_irq = 0;
#ifdef CUSTOMER_HW2
host_oob_irq = wifi_get_irq_number(irq_flags_ptr);
#else /* for NOT CUSTOMER_HW2 */
#if defined(CUSTOM_OOB_GPIO_NUM)
if (dhd_oob_gpio_num < 0) {
dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
}
#endif
if (dhd_oob_gpio_num < 0) {
WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
__FUNCTION__));
return (dhd_oob_gpio_num);
}
WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
__FUNCTION__, dhd_oob_gpio_num));
#if defined CUSTOMER_HW
host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num);
#elif defined CUSTOMER_HW3
gpio_request(dhd_oob_gpio_num, "oob irq");
host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
gpio_direction_input(dhd_oob_gpio_num);
#elif defined CUSTOMER_HW_SAMSUNG
host_oob_irq = dhd_oob_gpio_num;
#endif /* CUSTOMER_HW */
#endif /* CUSTOMER_HW2 */
return (host_oob_irq);
}
#endif /* defined(OOB_INTR_ONLY) */
/* Customer function to control hw specific wlan gpios */
void
dhd_customer_gpio_wlan_ctrl(int onoff)
{
switch (onoff) {
case WLAN_RESET_OFF:
WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n",
__FUNCTION__));
#ifdef CUSTOMER_HW_SAMSUNG
//bcm_wlan_power_off(2);
wlan_setup_power(0, 2);
#endif /* CUSTOMER_HW */
#ifdef CUSTOMER_HW
bcm_wlan_power_off(2);
#endif /* CUSTOMER_HW */
#ifdef CUSTOMER_HW2
wifi_set_power(0, 0);
#endif
WL_ERROR(("=========== WLAN placed in RESET ========\n"));
break;
case WLAN_RESET_ON:
WL_TRACE(("%s: callc customer specific GPIO to remove WLAN RESET\n",
__FUNCTION__));
#ifdef CUSTOMER_HW_SAMSUNG
//bcm_wlan_power_on(2);
wlan_setup_power(1, 2);
#endif /* CUSTOMER_HW */
#ifdef CUSTOMER_HW
bcm_wlan_power_on(2);
#endif /* CUSTOMER_HW */
#ifdef CUSTOMER_HW2
wifi_set_power(1, 0);
#endif
WL_ERROR(("=========== WLAN going back to live ========\n"));
break;
case WLAN_POWER_OFF:
WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n",
__FUNCTION__));
#ifdef CUSTOMER_HW_SAMSUNG
//bcm_wlan_power_off(1);
wlan_setup_power(0, 1);
#endif /* CUSTOMER_HW */
#ifdef CUSTOMER_HW
bcm_wlan_power_off(1);
#endif /* CUSTOMER_HW */
break;
case WLAN_POWER_ON:
WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n",
__FUNCTION__));
#ifdef CUSTOMER_HW_SAMSUNG
//bcm_wlan_power_on(1);
wlan_setup_power(1, 1);
#endif /* CUSTOMER_HW */
#ifdef CUSTOMER_HW
bcm_wlan_power_on(1);
/* Lets customer power to get stable */
OSL_DELAY(50);
#endif /* CUSTOMER_HW */
break;
}
}
#ifdef GET_CUSTOM_MAC_ENABLE
/* Function to get custom MAC address */
int
dhd_custom_get_mac_address(unsigned char *buf)
{
int ret = 0;
WL_TRACE(("%s Enter\n", __FUNCTION__));
if (!buf)
return -EINVAL;
/* Customer access to MAC address stored outside of DHD driver */
#ifdef CUSTOMER_HW2
ret = wifi_get_mac_addr(buf);
#endif
#ifdef EXAMPLE_GET_MAC
/* EXAMPLE code */
{
struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
}
#endif /* EXAMPLE_GET_MAC */
return ret;
}
#endif /* GET_CUSTOM_MAC_ENABLE */
/* Customized Locale table : OPTIONAL feature */
const struct cntry_locales_custom translate_custom_table[] = {
/* Table should be filled out based on custom platform regulatory requirement */
#ifdef EXAMPLE_TABLE
{"", "XY", 4}, /* universal */
{"US", "US", 69}, /* input ISO "US" to : US regrev 69 */
{"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */
{"EU", "EU", 5}, /* European union countries */
{"AT", "EU", 5},
{"BE", "EU", 5},
{"BG", "EU", 5},
{"CY", "EU", 5},
{"CZ", "EU", 5},
{"DK", "EU", 5},
{"EE", "EU", 5},
{"FI", "EU", 5},
{"FR", "EU", 5},
{"DE", "EU", 5},
{"GR", "EU", 5},
{"HU", "EU", 5},
{"IE", "EU", 5},
{"IT", "EU", 5},
{"LV", "EU", 5},
{"LI", "EU", 5},
{"LT", "EU", 5},
{"LU", "EU", 5},
{"MT", "EU", 5},
{"NL", "EU", 5},
{"PL", "EU", 5},
{"PT", "EU", 5},
{"RO", "EU", 5},
{"SK", "EU", 5},
{"SI", "EU", 5},
{"ES", "EU", 5},
{"SE", "EU", 5},
{"GB", "EU", 5}, /* input ISO "GB" to : EU regrev 05 */
{"IL", "IL", 0},
{"CH", "CH", 0},
{"TR", "TR", 0},
{"NO", "NO", 0},
{"KR", "XY", 3},
{"AU", "XY", 3},
{"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */
{"TW", "XY", 3},
{"AR", "XY", 3},
{"MX", "XY", 3}
#endif /* EXAMPLE_TABLE */
};
/* Customized Locale convertor
* input : ISO 3166-1 country abbreviation
* output: customized cspec
*/
void get_customized_country_code(char *country_iso_code, wl_country_t *cspec)
{
#ifdef CUSTOMER_HW2
struct cntry_locales_custom *cloc_ptr;
if (!cspec)
return;
cloc_ptr = wifi_get_country_code(country_iso_code);
if (cloc_ptr) {
strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
cspec->rev = cloc_ptr->custom_locale_rev;
}
return;
#else
int size, i;
size = ARRAYSIZE(translate_custom_table);
if (cspec == 0)
return;
if (size == 0)
return;
for (i = 0; i < size; i++) {
if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
memcpy(cspec->ccode, translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
cspec->rev = translate_custom_table[i].custom_locale_rev;
return;
}
}
memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ);
cspec->rev = translate_custom_table[0].custom_locale_rev;
return;
#endif
}
| sdadier/gb_kernel_2.6.32.9-mtd | Kernel/drivers/net/wireless/bcm4329/dhd_custom_gpio.c | C | gpl-2.0 | 8,018 |
/** ============================================================================
* @file zsp800m_map.c
*
* @path $(APUDRV)/gpp/src/arch/ZSP800M/
*
* @desc Defines the configuration mapping information for the APU DRIVER
* driver.
*
* @ver 0.01.00.00
* ============================================================================
* Copyright (C) 2011-2012, Nufront Incorporated - http://www.nufront.com/
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
* whether express or implied; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* ============================================================================
*/
/* ----------------------------------- APU DRIVER Headers */
#include <apudrv.h>
#include <_apudrv.h>
#if defined (POOL_COMPONENT)
#include <pooldefs.h>
#endif /* if defined (POOL_COMPONENT) */
#if defined (MSGQ_COMPONENT)
#include <msgqdefs.h>
#endif /* if defined (MSGQ_COMPONENT) */
#include <cfgmap.h>
#if defined (__cplusplus)
EXTERN "C" {
#endif /* defined (__cplusplus) */
/** ============================================================================
* @name ZSP800MMAP_DspObjects
*
* @desc Array of configuration mapping objects for the DSPs in the system.
* ============================================================================
*/
EXTERN DSP_Interface ZSP800M_Interface ;
CONST CFGMAP_Dsp ZSP800MMAP_DspObjects [] =
{
{
"ZSP800M", /* NAME : Name of the DSP */
&ZSP800M_Interface /* INTERFACE : DSP interface table */
}
} ;
/** ============================================================================
* @name ZSP800MMAP_LoaderObjects
*
* @desc Array of configuration mapping objects for the DSP executable
* loaders in the system.
* ============================================================================
*/
EXTERN KFILE_Interface KFILEPSEUDO_Interface ;
EXTERN KFILE_Interface KFILEDEF_Interface ;
CONST CFGMAP_Loader ZSP800MMAP_LoaderObjects [] =
{
{
"BIN",
NULL,//&BINFILE_Interface,
&KFILEDEF_Interface
}
} ;
#if (!defined (ONLY_PROC_COMPONENT))
/** ============================================================================
* @name ZSP800MMAP_LinkDrvObjects
*
* @desc Array of configuration mapping objects for the link drivers in the
* system.
* ============================================================================
*/
EXTERN DRV_Interface SHMDRV_Interface ;
CONST CFGMAP_LinkDrv ZSP800MMAP_LinkDrvObjects [] =
{
{
"SHMDRV", /* NAME : Name of the link driver */
&SHMDRV_Interface /* INTERFACE : Link driver interface table */
}
} ;
/** ============================================================================
* @name ZSP800MMAP_IpsObjects
*
* @desc Array of configuration mapping objects for the IPS components in the
* system.
* ============================================================================
*/
EXTERN FnIpsInit IPS_init ;
EXTERN FnIpsExit IPS_exit ;
#if defined (DDSP_DEBUG)
EXTERN FnIpsDebug IPS_debug ;
#endif /* if defined (DDSP_DEBUG) */
CONST CFGMAP_Ips ZSP800MMAP_IpsObjects [] =
{
{
"IPS", /* NAME : Name of the IPS */
(FnIpsInit) &IPS_init, /* FXN_INIT : Init function for the IPS */
(FnIpsExit) &IPS_exit, /* FXN_EXIT : Exit function for the IPS */
#if defined (DDSP_DEBUG)
(FnIpsDebug) &IPS_debug /* FXN_DEBUG : Debug function for the IPS */
#endif /* if defined (DDSP_DEBUG) */
}
} ;
#endif /* if (!defined (ONLY_PROC_COMPONENT)) */
#if defined (POOL_COMPONENT)
/** ============================================================================
* @name ZSP800MMAP_PoolObjects
*
* @desc Array of configuration mapping objects for the POOLs in the system.
* ============================================================================
*/
EXTERN FnPoolInit SMAPOOL_init ;
EXTERN FnPoolExit SMAPOOL_exit ;
EXTERN POOL_Interface SMAPOOL_Interface ;
#if defined (PCPY_LINK)
EXTERN FnPoolInit BUFPOOL_init ;
EXTERN FnPoolExit BUFPOOL_exit ;
EXTERN POOL_Interface BUFPOOL_Interface ;
#endif /* if defined (PCPY_LINK) */
CONST CFGMAP_Pool ZSP800MMAP_PoolObjects [] =
{
{
"SMAPOOL", /* NAME : Name of the pool */
(FnPoolInit) &SMAPOOL_init, /* FXN_INIT : Init function for the pool */
(FnPoolExit) &SMAPOOL_exit, /* FXN_EXIT : Exit function for the pool */
&SMAPOOL_Interface /* INTERFACE : Pool interface table */
},
#if defined (PCPY_LINK)
{
"BUFPOOL", /* NAME : Name of the pool */
(FnPoolInit) &BUFPOOL_init, /* FXN_INIT : Init function for the pool */
(FnPoolExit) &BUFPOOL_exit, /* FXN_EXIT : Exit function for the pool */
&BUFPOOL_Interface /* INTERFACE : Pool interface table */
}
#endif /* if defined (PCPY_LINK) */
} ;
#endif /* if defined (POOL_COMPONENT) */
#if defined (CHNL_COMPONENT)
/** ============================================================================
* @name ZSP800MMAP_DataDrvObjects
*
* @desc Array of configuration mapping objects for the Data drivers in the
* system.
* ============================================================================
*/
EXTERN DATA_Interface ZCPYDATA_Interface ;
CONST CFGMAP_DataDrv ZSP800MMAP_DataDrvObjects [] =
{
{
"ZCPYDATA", /* NAME : Name of the data driver */
&ZCPYDATA_Interface /* INTERFACE : Data transfer interface table */
}
} ;
#endif /* if defined (CHNL_COMPONENT) */
#if defined (MSGQ_COMPONENT)
/** ============================================================================
* @name ZSP800MMAP_MqtObjects
*
* @desc Array of configuration mapping objects for the Message Queue
* Transports in the system.
* ============================================================================
*/
EXTERN MQT_Interface ZCPYMQT_Interface ;
CONST CFGMAP_Mqt ZSP800MMAP_MqtObjects [] =
{
{
"ZCPYMQT", /* NAME : Name of the Message Queue Transport */
&ZCPYMQT_Interface /* INTERFACE : MQT Interface table */
}
} ;
#endif /* if defined (MSGQ_COMPONENT) */
/** ============================================================================
* @name ZSP800MMAP_Config
*
* @desc APU DRIVER configuration mapping structure.
* ============================================================================
*/
CFGMAP_Object ZSP800MMAP_Config = {
1, /* NUMDSPS : Number of types of DSPs */
(CFGMAP_Dsp *) ZSP800MMAP_DspObjects, /* DSPOBJECTS : Array of DSP configuration mapping objects */
1, /* NUMLOADERS : Number of types of DSP executable loaders */
(CFGMAP_Loader *) ZSP800MMAP_LoaderObjects, /* LOADERS : Array of DSP executable loader configuration mapping objects */
#if (!defined (ONLY_PROC_COMPONENT))
1, /* NUMLINKDRVS : Number of types of link drivers */
(CFGMAP_LinkDrv *) ZSP800MMAP_LinkDrvObjects, /* LINKDRVOBJECTS : Array of Link Driver configuration mapping objects */
1, /* NUMIPS : Number of types of IPS */
(CFGMAP_Ips *) ZSP800MMAP_IpsObjects, /* IPSOBJECTS : Array of IPS configuration mapping objects */
#else
0, /* NUMLINKDRVS : Number of types of link drivers */
NULL, /* LINKDRVOBJECTS : Array of Link Driver configuration mapping objects */
0, /* NUMIPS : Number of types of IPS */
NULL, /* IPSOBJECTS : Array of IPS configuration mapping objects */
#endif /* if (!defined (ONLY_PROC_COMPONENT)) */
#if defined (POOL_COMPONENT)
1, /* NUMPOOLS : Number of types of POOLs */
(CFGMAP_Pool *) ZSP800MMAP_PoolObjects, /* POOLOBJECTS : Array of POOL configuration mapping objects */
#endif /* if defined (POOL_COMPONENT) */
#if defined (CHNL_COMPONENT)
1, /* NUMDATADRIVERS : Number of types of Data drivers */
(CFGMAP_DataDrv *) ZSP800MMAP_DataDrvObjects, /* DATADRIVERS : Array of Data driver configuration mapping objects */
#endif /* if defined (CHNL_COMPONENT) */
#if defined (MSGQ_COMPONENT)
1, /* NUMMQTS : Number of types of MQTs */
(CFGMAP_Mqt *) ZSP800MMAP_MqtObjects /* MQTOBJECTS : Array of MQT configuration mapping objects */
#endif /* if defined (MSGQ_COMPONENT) */
} ;
#if defined (__cplusplus)
}
#endif /* defined (__cplusplus) */
| adamdmcbride/Nufront_linux_kernel | drivers/char/apu/gpp/src/arch/ZSP800M/zsp800m_map.c | C | gpl-2.0 | 9,567 |
/*
* This file is part of the coreboot project.
*
* Copyright (C) 2012 ChromeOS Authors
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <string.h>
#include <cbfs.h>
#include <cbmem.h>
#include <console/console.h>
#include <arch/cpu.h>
#include <cpu/x86/bist.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h>
#include <halt.h>
#include <lib.h>
#include <timestamp.h>
#include <arch/io.h>
#include <arch/stages.h>
#include <device/pci_def.h>
#include <cpu/x86/lapic.h>
#include <cbfs.h>
#include <romstage_handoff.h>
#include <reset.h>
#include <stage_cache.h>
#include <vendorcode/google/chromeos/chromeos.h>
#if CONFIG_EC_GOOGLE_CHROMEEC
#include <ec/google/chromeec/ec.h>
#endif
#include "haswell.h"
#include "northbridge/intel/haswell/haswell.h"
#include "northbridge/intel/haswell/raminit.h"
#include "southbridge/intel/lynxpoint/pch.h"
#include "southbridge/intel/lynxpoint/me.h"
static inline void reset_system(void)
{
hard_reset();
halt();
}
/* The cache-as-ram assembly file calls romstage_main() after setting up
* cache-as-ram. romstage_main() will then call the mainboards's
* mainboard_romstage_entry() function. That function then calls
* romstage_common() below. The reason for the back and forth is to provide
* common entry point from cache-as-ram while still allowing for code sharing.
* Because we can't use global variables the stack is used for allocations --
* thus the need to call back and forth. */
static inline u32 *stack_push(u32 *stack, u32 value)
{
stack = &stack[-1];
*stack = value;
return stack;
}
/* Romstage needs quite a bit of stack for decompressing images since the lzma
* lib keeps its state on the stack during romstage. */
#define ROMSTAGE_RAM_STACK_SIZE 0x5000
static unsigned long choose_top_of_stack(void)
{
unsigned long stack_top;
/* cbmem_add() does a find() before add(). */
stack_top = (unsigned long)cbmem_add(CBMEM_ID_ROMSTAGE_RAM_STACK,
ROMSTAGE_RAM_STACK_SIZE);
stack_top += ROMSTAGE_RAM_STACK_SIZE;
return stack_top;
}
/* setup_romstage_stack_after_car() determines the stack to use after
* cache-as-ram is torn down as well as the MTRR settings to use. */
static void *setup_romstage_stack_after_car(void)
{
unsigned long top_of_stack;
int num_mtrrs;
u32 *slot;
u32 mtrr_mask_upper;
u32 top_of_ram;
/* Top of stack needs to be aligned to a 4-byte boundary. */
top_of_stack = choose_top_of_stack() & ~3;
slot = (void *)top_of_stack;
num_mtrrs = 0;
/* The upper bits of the MTRR mask need to set according to the number
* of physical address bits. */
mtrr_mask_upper = (1 << ((cpuid_eax(0x80000008) & 0xff) - 32)) - 1;
/* The order for each MTRR is value then base with upper 32-bits of
* each value coming before the lower 32-bits. The reasoning for
* this ordering is to create a stack layout like the following:
* +0: Number of MTRRs
* +4: MTRR base 0 31:0
* +8: MTRR base 0 63:32
* +12: MTRR mask 0 31:0
* +16: MTRR mask 0 63:32
* +20: MTRR base 1 31:0
* +24: MTRR base 1 63:32
* +28: MTRR mask 1 31:0
* +32: MTRR mask 1 63:32
*/
/* Cache the ROM as WP just below 4GiB. */
slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
slot = stack_push(slot, ~(CACHE_ROM_SIZE - 1) | MTRRphysMaskValid);
slot = stack_push(slot, 0); /* upper base */
slot = stack_push(slot, ~(CACHE_ROM_SIZE - 1) | MTRR_TYPE_WRPROT);
num_mtrrs++;
/* Cache RAM as WB from 0 -> CONFIG_RAMTOP. */
slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid);
slot = stack_push(slot, 0); /* upper base */
slot = stack_push(slot, 0 | MTRR_TYPE_WRBACK);
num_mtrrs++;
top_of_ram = (uint32_t)cbmem_top();
/* Cache 8MiB below the top of ram. On haswell systems the top of
* ram under 4GiB is the start of the TSEG region. It is required to
* be 8MiB aligned. Set this area as cacheable so it can be used later
* for ramstage before setting up the entire RAM as cacheable. */
slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
slot = stack_push(slot, 0); /* upper base */
slot = stack_push(slot, (top_of_ram - (8 << 20)) | MTRR_TYPE_WRBACK);
num_mtrrs++;
/* Cache 8MiB at the top of ram. Top of ram on haswell systems
* is where the TSEG region resides. However, it is not restricted
* to SMM mode until SMM has been relocated. By setting the region
* to cacheable it provides faster access when relocating the SMM
* handler as well as using the TSEG region for other purposes. */
slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
slot = stack_push(slot, 0); /* upper base */
slot = stack_push(slot, top_of_ram | MTRR_TYPE_WRBACK);
num_mtrrs++;
/* Save the number of MTRRs to setup. Return the stack location
* pointing to the number of MTRRs. */
slot = stack_push(slot, num_mtrrs);
return slot;
}
void * asmlinkage romstage_main(unsigned long bist)
{
int i;
void *romstage_stack_after_car;
const int num_guards = 4;
const u32 stack_guard = 0xdeadbeef;
u32 *stack_base = (void *)(CONFIG_DCACHE_RAM_BASE +
CONFIG_DCACHE_RAM_SIZE -
CONFIG_DCACHE_RAM_ROMSTAGE_STACK_SIZE);
printk(BIOS_DEBUG, "Setting up stack guards.\n");
for (i = 0; i < num_guards; i++)
stack_base[i] = stack_guard;
mainboard_romstage_entry(bist);
/* Check the stack. */
for (i = 0; i < num_guards; i++) {
if (stack_base[i] == stack_guard)
continue;
printk(BIOS_DEBUG, "Smashed stack detected in romstage!\n");
}
/* Get the stack to use after cache-as-ram is torn down. */
romstage_stack_after_car = setup_romstage_stack_after_car();
return romstage_stack_after_car;
}
void romstage_common(const struct romstage_params *params)
{
int boot_mode;
int wake_from_s3;
struct romstage_handoff *handoff;
timestamp_init(get_initial_timestamp());
timestamp_add_now(TS_START_ROMSTAGE);
if (params->bist == 0)
enable_lapic();
wake_from_s3 = early_pch_init(params->gpio_map, params->rcba_config);
#if CONFIG_EC_GOOGLE_CHROMEEC
/* Ensure the EC is in the right mode for recovery */
google_chromeec_early_init();
#endif
/* Halt if there was a built in self test failure */
report_bist_failure(params->bist);
/* Perform some early chipset initialization required
* before RAM initialization can work
*/
haswell_early_initialization(HASWELL_MOBILE);
printk(BIOS_DEBUG, "Back from haswell_early_initialization()\n");
if (wake_from_s3) {
#if CONFIG_HAVE_ACPI_RESUME
printk(BIOS_DEBUG, "Resume from S3 detected.\n");
#else
printk(BIOS_DEBUG, "Resume from S3 detected, but disabled.\n");
wake_from_s3 = 0;
#endif
}
/* There are hard coded assumptions of 2 meaning s3 wake. Normalize
* the users of the 2 literal here based off wake_from_s3. */
boot_mode = wake_from_s3 ? 2 : 0;
/* Prepare USB controller early in S3 resume */
if (wake_from_s3)
enable_usb_bar();
post_code(0x3a);
params->pei_data->boot_mode = boot_mode;
timestamp_add_now(TS_BEFORE_INITRAM);
report_platform_info();
if (params->copy_spd != NULL)
params->copy_spd(params->pei_data);
sdram_initialize(params->pei_data);
timestamp_add_now(TS_AFTER_INITRAM);
post_code(0x3b);
intel_early_me_status();
quick_ram_check();
post_code(0x3e);
if (!wake_from_s3) {
cbmem_initialize_empty();
stage_cache_create_empty();
/* Save data returned from MRC on non-S3 resumes. */
save_mrc_data(params->pei_data);
} else {
stage_cache_recover();
if (cbmem_initialize()) {
#if CONFIG_HAVE_ACPI_RESUME
/* Failed S3 resume, reset to come up cleanly */
reset_system();
#endif
}
}
handoff = romstage_handoff_find_or_add();
if (handoff != NULL)
handoff->s3_resume = wake_from_s3;
else
printk(BIOS_DEBUG, "Romstage handoff structure not added!\n");
post_code(0x3f);
#if CONFIG_CHROMEOS
init_chromeos(boot_mode);
#endif
timestamp_add_now(TS_END_ROMSTAGE);
}
static inline void prepare_for_resume(struct romstage_handoff *handoff)
{
/* Only need to save memory when ramstage isn't relocatable. */
#if !CONFIG_RELOCATABLE_RAMSTAGE
#if CONFIG_HAVE_ACPI_RESUME
/* Back up the OS-controlled memory where ramstage will be loaded. */
if (handoff != NULL && handoff->s3_resume) {
void *src = (void *)CONFIG_RAMBASE;
void *dest = cbmem_find(CBMEM_ID_RESUME);
if (dest != NULL)
memcpy(dest, src, HIGH_MEMORY_SAVE);
}
#endif
#endif
}
void romstage_after_car(void)
{
struct romstage_handoff *handoff;
handoff = romstage_handoff_find_or_add();
prepare_for_resume(handoff);
/* Load the ramstage. */
copy_and_run();
}
#if IS_ENABLED(CONFIG_CACHE_RELOCATED_RAMSTAGE_OUTSIDE_CBMEM)
void stage_cache_external_region(void **base, size_t *size)
{
/* The ramstage cache lives in the TSEG region at RESERVED_SMM_OFFSET.
* The top of ram is defined to be the TSEG base address. */
*size = RESERVED_SMM_SIZE;
*base = (void *)((uint32_t)cbmem_top() + RESERVED_SMM_OFFSET);
}
void ramstage_cache_invalid(void)
{
#if CONFIG_RESET_ON_INVALID_RAMSTAGE_CACHE
reset_system();
#endif
}
#endif
| coreboot-gs45/coreboot | src/cpu/intel/haswell/romstage.c | C | gpl-2.0 | 9,829 |
/*
SDL - Simple DirectMedia Layer
Copyright (C) 1997-2012 Sam Lantinga
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Sam Lantinga
slouken@libsdl.org
*/
#include "SDL_config.h"
#include "SDL_mouse.h"
#include "../../events/SDL_events_c.h"
#include "SDL_vglvideo.h"
#include "SDL_vglmouse_c.h"
struct WMcursor {
int unused;
};
void VGL_FreeWMCursor(_THIS, WMcursor *cursor)
{
return;
}
WMcursor *VGL_CreateWMCursor(_THIS,
Uint8 *data, Uint8 *mask, int w, int h, int hot_x, int hot_y)
{
return(NULL);
}
int VGL_ShowWMCursor(_THIS, WMcursor *cursor)
{
return(0);
}
void VGL_WarpWMCursor(_THIS, Uint16 x, Uint16 y)
{
SDL_PrivateMouseMotion(0, 0, x, y);
}
| qtekfun/htcDesire820Kernel | external/qemu/distrib/sdl-1.2.15/src/video/vgl/SDL_vglmouse.c | C | gpl-2.0 | 1,382 |
/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
/***
This file is part of systemd.
Copyright 2010 Lennart Poettering
systemd is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
systemd is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with systemd; If not, see <http://www.gnu.org/licenses/>.
***/
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include "sd-id128.h"
#include "sd-messages.h"
#include "alloc-util.h"
#include "bus-common-errors.h"
#include "bus-util.h"
#include "cgroup-util.h"
#include "dbus-unit.h"
#include "dbus.h"
#include "dropin.h"
#include "escape.h"
#include "execute.h"
#include "fileio-label.h"
#include "formats-util.h"
#include "load-dropin.h"
#include "load-fragment.h"
#include "log.h"
#include "macro.h"
#include "missing.h"
#include "mkdir.h"
#include "parse-util.h"
#include "path-util.h"
#include "process-util.h"
#include "set.h"
#include "special.h"
#include "stat-util.h"
#include "string-util.h"
#include "strv.h"
#include "unit-name.h"
#include "unit.h"
#include "user-util.h"
#include "virt.h"
const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
[UNIT_SERVICE] = &service_vtable,
[UNIT_SOCKET] = &socket_vtable,
[UNIT_BUSNAME] = &busname_vtable,
[UNIT_TARGET] = &target_vtable,
[UNIT_DEVICE] = &device_vtable,
[UNIT_MOUNT] = &mount_vtable,
[UNIT_AUTOMOUNT] = &automount_vtable,
[UNIT_SWAP] = &swap_vtable,
[UNIT_TIMER] = &timer_vtable,
[UNIT_PATH] = &path_vtable,
[UNIT_SLICE] = &slice_vtable,
[UNIT_SCOPE] = &scope_vtable
};
static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
Unit *unit_new(Manager *m, size_t size) {
Unit *u;
assert(m);
assert(size >= sizeof(Unit));
u = malloc0(size);
if (!u)
return NULL;
u->names = set_new(&string_hash_ops);
if (!u->names) {
free(u);
return NULL;
}
u->manager = m;
u->type = _UNIT_TYPE_INVALID;
u->default_dependencies = true;
u->unit_file_state = _UNIT_FILE_STATE_INVALID;
u->unit_file_preset = -1;
u->on_failure_job_mode = JOB_REPLACE;
u->cgroup_inotify_wd = -1;
RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
return u;
}
bool unit_has_name(Unit *u, const char *name) {
assert(u);
assert(name);
return !!set_get(u->names, (char*) name);
}
static void unit_init(Unit *u) {
CGroupContext *cc;
ExecContext *ec;
KillContext *kc;
assert(u);
assert(u->manager);
assert(u->type >= 0);
cc = unit_get_cgroup_context(u);
if (cc) {
cgroup_context_init(cc);
/* Copy in the manager defaults into the cgroup
* context, _before_ the rest of the settings have
* been initialized */
cc->cpu_accounting = u->manager->default_cpu_accounting;
cc->blockio_accounting = u->manager->default_blockio_accounting;
cc->memory_accounting = u->manager->default_memory_accounting;
cc->tasks_accounting = u->manager->default_tasks_accounting;
}
ec = unit_get_exec_context(u);
if (ec)
exec_context_init(ec);
kc = unit_get_kill_context(u);
if (kc)
kill_context_init(kc);
if (UNIT_VTABLE(u)->init)
UNIT_VTABLE(u)->init(u);
}
int unit_add_name(Unit *u, const char *text) {
_cleanup_free_ char *s = NULL, *i = NULL;
UnitType t;
int r;
assert(u);
assert(text);
if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
if (!u->instance)
return -EINVAL;
r = unit_name_replace_instance(text, u->instance, &s);
if (r < 0)
return r;
} else {
s = strdup(text);
if (!s)
return -ENOMEM;
}
if (set_contains(u->names, s))
return 0;
if (hashmap_contains(u->manager->units, s))
return -EEXIST;
if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
return -EINVAL;
t = unit_name_to_type(s);
if (t < 0)
return -EINVAL;
if (u->type != _UNIT_TYPE_INVALID && t != u->type)
return -EINVAL;
r = unit_name_to_instance(s, &i);
if (r < 0)
return r;
if (i && unit_vtable[t]->no_instances)
return -EINVAL;
/* Ensure that this unit is either instanced or not instanced,
* but not both. Note that we do allow names with different
* instance names however! */
if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
return -EINVAL;
if (unit_vtable[t]->no_alias && !set_isempty(u->names))
return -EEXIST;
if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
return -E2BIG;
r = set_put(u->names, s);
if (r < 0)
return r;
assert(r > 0);
r = hashmap_put(u->manager->units, s, u);
if (r < 0) {
(void) set_remove(u->names, s);
return r;
}
if (u->type == _UNIT_TYPE_INVALID) {
u->type = t;
u->id = s;
u->instance = i;
LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
unit_init(u);
i = NULL;
}
s = NULL;
unit_add_to_dbus_queue(u);
return 0;
}
int unit_choose_id(Unit *u, const char *name) {
_cleanup_free_ char *t = NULL;
char *s, *i;
int r;
assert(u);
assert(name);
if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
if (!u->instance)
return -EINVAL;
r = unit_name_replace_instance(name, u->instance, &t);
if (r < 0)
return r;
name = t;
}
/* Selects one of the names of this unit as the id */
s = set_get(u->names, (char*) name);
if (!s)
return -ENOENT;
/* Determine the new instance from the new id */
r = unit_name_to_instance(s, &i);
if (r < 0)
return r;
u->id = s;
free(u->instance);
u->instance = i;
unit_add_to_dbus_queue(u);
return 0;
}
int unit_set_description(Unit *u, const char *description) {
char *s;
assert(u);
if (isempty(description))
s = NULL;
else {
s = strdup(description);
if (!s)
return -ENOMEM;
}
free(u->description);
u->description = s;
unit_add_to_dbus_queue(u);
return 0;
}
bool unit_check_gc(Unit *u) {
UnitActiveState state;
assert(u);
if (u->job)
return true;
if (u->nop_job)
return true;
state = unit_active_state(u);
/* If the unit is inactive and failed and no job is queued for
* it, then release its runtime resources */
if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
UNIT_VTABLE(u)->release_resources)
UNIT_VTABLE(u)->release_resources(u);
/* But we keep the unit object around for longer when it is
* referenced or configured to not be gc'ed */
if (state != UNIT_INACTIVE)
return true;
if (UNIT_VTABLE(u)->no_gc)
return true;
if (u->no_gc)
return true;
if (u->refs)
return true;
if (UNIT_VTABLE(u)->check_gc)
if (UNIT_VTABLE(u)->check_gc(u))
return true;
return false;
}
void unit_add_to_load_queue(Unit *u) {
assert(u);
assert(u->type != _UNIT_TYPE_INVALID);
if (u->load_state != UNIT_STUB || u->in_load_queue)
return;
LIST_PREPEND(load_queue, u->manager->load_queue, u);
u->in_load_queue = true;
}
void unit_add_to_cleanup_queue(Unit *u) {
assert(u);
if (u->in_cleanup_queue)
return;
LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
u->in_cleanup_queue = true;
}
void unit_add_to_gc_queue(Unit *u) {
assert(u);
if (u->in_gc_queue || u->in_cleanup_queue)
return;
if (unit_check_gc(u))
return;
LIST_PREPEND(gc_queue, u->manager->gc_queue, u);
u->in_gc_queue = true;
u->manager->n_in_gc_queue ++;
}
void unit_add_to_dbus_queue(Unit *u) {
assert(u);
assert(u->type != _UNIT_TYPE_INVALID);
if (u->load_state == UNIT_STUB || u->in_dbus_queue)
return;
/* Shortcut things if nobody cares */
if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
set_isempty(u->manager->private_buses)) {
u->sent_dbus_new_signal = true;
return;
}
LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
u->in_dbus_queue = true;
}
static void bidi_set_free(Unit *u, Set *s) {
Iterator i;
Unit *other;
assert(u);
/* Frees the set and makes sure we are dropped from the
* inverse pointers */
SET_FOREACH(other, s, i) {
UnitDependency d;
for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
set_remove(other->dependencies[d], u);
unit_add_to_gc_queue(other);
}
set_free(s);
}
static void unit_remove_transient(Unit *u) {
char **i;
assert(u);
if (!u->transient)
return;
if (u->fragment_path)
(void) unlink(u->fragment_path);
STRV_FOREACH(i, u->dropin_paths) {
_cleanup_free_ char *p = NULL;
(void) unlink(*i);
p = dirname_malloc(*i);
if (p)
(void) rmdir(p);
}
}
static void unit_free_requires_mounts_for(Unit *u) {
char **j;
STRV_FOREACH(j, u->requires_mounts_for) {
char s[strlen(*j) + 1];
PATH_FOREACH_PREFIX_MORE(s, *j) {
char *y;
Set *x;
x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
if (!x)
continue;
set_remove(x, u);
if (set_isempty(x)) {
hashmap_remove(u->manager->units_requiring_mounts_for, y);
free(y);
set_free(x);
}
}
}
u->requires_mounts_for = strv_free(u->requires_mounts_for);
}
static void unit_done(Unit *u) {
ExecContext *ec;
CGroupContext *cc;
int r;
assert(u);
if (u->type < 0)
return;
if (UNIT_VTABLE(u)->done)
UNIT_VTABLE(u)->done(u);
ec = unit_get_exec_context(u);
if (ec)
exec_context_done(ec);
cc = unit_get_cgroup_context(u);
if (cc)
cgroup_context_done(cc);
r = unit_remove_from_netclass_cgroup(u);
if (r < 0)
log_warning_errno(r, "Unable to remove unit from netclass group: %m");
}
void unit_free(Unit *u) {
UnitDependency d;
Iterator i;
char *t;
assert(u);
if (u->manager->n_reloading <= 0)
unit_remove_transient(u);
bus_unit_send_removed_signal(u);
unit_done(u);
sd_bus_slot_unref(u->match_bus_slot);
unit_free_requires_mounts_for(u);
SET_FOREACH(t, u->names, i)
hashmap_remove_value(u->manager->units, t, u);
if (u->job) {
Job *j = u->job;
job_uninstall(j);
job_free(j);
}
if (u->nop_job) {
Job *j = u->nop_job;
job_uninstall(j);
job_free(j);
}
for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
bidi_set_free(u, u->dependencies[d]);
if (u->type != _UNIT_TYPE_INVALID)
LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
if (u->in_load_queue)
LIST_REMOVE(load_queue, u->manager->load_queue, u);
if (u->in_dbus_queue)
LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
if (u->in_cleanup_queue)
LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
if (u->in_gc_queue) {
LIST_REMOVE(gc_queue, u->manager->gc_queue, u);
u->manager->n_in_gc_queue--;
}
if (u->in_cgroup_queue)
LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
unit_release_cgroup(u);
(void) manager_update_failed_units(u->manager, u, false);
set_remove(u->manager->startup_units, u);
free(u->description);
strv_free(u->documentation);
free(u->fragment_path);
free(u->source_path);
strv_free(u->dropin_paths);
free(u->instance);
free(u->job_timeout_reboot_arg);
set_free_free(u->names);
unit_unwatch_all_pids(u);
condition_free_list(u->conditions);
condition_free_list(u->asserts);
unit_ref_unset(&u->slice);
while (u->refs)
unit_ref_unset(u->refs);
free(u);
}
UnitActiveState unit_active_state(Unit *u) {
assert(u);
if (u->load_state == UNIT_MERGED)
return unit_active_state(unit_follow_merge(u));
/* After a reload it might happen that a unit is not correctly
* loaded but still has a process around. That's why we won't
* shortcut failed loading to UNIT_INACTIVE_FAILED. */
return UNIT_VTABLE(u)->active_state(u);
}
const char* unit_sub_state_to_string(Unit *u) {
assert(u);
return UNIT_VTABLE(u)->sub_state_to_string(u);
}
static int complete_move(Set **s, Set **other) {
int r;
assert(s);
assert(other);
if (!*other)
return 0;
if (*s) {
r = set_move(*s, *other);
if (r < 0)
return r;
} else {
*s = *other;
*other = NULL;
}
return 0;
}
static int merge_names(Unit *u, Unit *other) {
char *t;
Iterator i;
int r;
assert(u);
assert(other);
r = complete_move(&u->names, &other->names);
if (r < 0)
return r;
set_free_free(other->names);
other->names = NULL;
other->id = NULL;
SET_FOREACH(t, u->names, i)
assert_se(hashmap_replace(u->manager->units, t, u) == 0);
return 0;
}
static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
unsigned n_reserve;
assert(u);
assert(other);
assert(d < _UNIT_DEPENDENCY_MAX);
/*
* If u does not have this dependency set allocated, there is no need
* to reserve anything. In that case other's set will be transferred
* as a whole to u by complete_move().
*/
if (!u->dependencies[d])
return 0;
/* merge_dependencies() will skip a u-on-u dependency */
n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
return set_reserve(u->dependencies[d], n_reserve);
}
static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
Iterator i;
Unit *back;
int r;
assert(u);
assert(other);
assert(d < _UNIT_DEPENDENCY_MAX);
/* Fix backwards pointers */
SET_FOREACH(back, other->dependencies[d], i) {
UnitDependency k;
for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
/* Do not add dependencies between u and itself */
if (back == u) {
if (set_remove(back->dependencies[k], other))
maybe_warn_about_dependency(u, other_id, k);
} else {
r = set_remove_and_put(back->dependencies[k], other, u);
if (r == -EEXIST)
set_remove(back->dependencies[k], other);
else
assert(r >= 0 || r == -ENOENT);
}
}
}
/* Also do not move dependencies on u to itself */
back = set_remove(other->dependencies[d], u);
if (back)
maybe_warn_about_dependency(u, other_id, d);
/* The move cannot fail. The caller must have performed a reservation. */
assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
other->dependencies[d] = set_free(other->dependencies[d]);
}
int unit_merge(Unit *u, Unit *other) {
UnitDependency d;
const char *other_id = NULL;
int r;
assert(u);
assert(other);
assert(u->manager == other->manager);
assert(u->type != _UNIT_TYPE_INVALID);
other = unit_follow_merge(other);
if (other == u)
return 0;
if (u->type != other->type)
return -EINVAL;
if (!u->instance != !other->instance)
return -EINVAL;
if (other->load_state != UNIT_STUB &&
other->load_state != UNIT_NOT_FOUND)
return -EEXIST;
if (other->job)
return -EEXIST;
if (other->nop_job)
return -EEXIST;
if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
return -EEXIST;
if (other->id)
other_id = strdupa(other->id);
/* Make reservations to ensure merge_dependencies() won't fail */
for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
r = reserve_dependencies(u, other, d);
/*
* We don't rollback reservations if we fail. We don't have
* a way to undo reservations. A reservation is not a leak.
*/
if (r < 0)
return r;
}
/* Merge names */
r = merge_names(u, other);
if (r < 0)
return r;
/* Redirect all references */
while (other->refs)
unit_ref_set(other->refs, u);
/* Merge dependencies */
for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
merge_dependencies(u, other, other_id, d);
other->load_state = UNIT_MERGED;
other->merged_into = u;
/* If there is still some data attached to the other node, we
* don't need it anymore, and can free it. */
if (other->load_state != UNIT_STUB)
if (UNIT_VTABLE(other)->done)
UNIT_VTABLE(other)->done(other);
unit_add_to_dbus_queue(u);
unit_add_to_cleanup_queue(other);
return 0;
}
int unit_merge_by_name(Unit *u, const char *name) {
Unit *other;
int r;
_cleanup_free_ char *s = NULL;
assert(u);
assert(name);
if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
if (!u->instance)
return -EINVAL;
r = unit_name_replace_instance(name, u->instance, &s);
if (r < 0)
return r;
name = s;
}
other = manager_get_unit(u->manager, name);
if (other)
return unit_merge(u, other);
return unit_add_name(u, name);
}
Unit* unit_follow_merge(Unit *u) {
assert(u);
while (u->load_state == UNIT_MERGED)
assert_se(u = u->merged_into);
return u;
}
int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
int r;
assert(u);
assert(c);
if (c->working_directory) {
r = unit_require_mounts_for(u, c->working_directory);
if (r < 0)
return r;
}
if (c->root_directory) {
r = unit_require_mounts_for(u, c->root_directory);
if (r < 0)
return r;
}
if (u->manager->running_as != MANAGER_SYSTEM)
return 0;
if (c->private_tmp) {
r = unit_require_mounts_for(u, "/tmp");
if (r < 0)
return r;
r = unit_require_mounts_for(u, "/var/tmp");
if (r < 0)
return r;
}
if (c->std_output != EXEC_OUTPUT_KMSG &&
c->std_output != EXEC_OUTPUT_SYSLOG &&
c->std_output != EXEC_OUTPUT_JOURNAL &&
c->std_output != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
c->std_output != EXEC_OUTPUT_SYSLOG_AND_CONSOLE &&
c->std_output != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
c->std_error != EXEC_OUTPUT_KMSG &&
c->std_error != EXEC_OUTPUT_SYSLOG &&
c->std_error != EXEC_OUTPUT_JOURNAL &&
c->std_error != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
c->std_error != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
c->std_error != EXEC_OUTPUT_SYSLOG_AND_CONSOLE)
return 0;
/* If syslog or kernel logging is requested, make sure our own
* logging daemon is run first. */
r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
if (r < 0)
return r;
return 0;
}
const char *unit_description(Unit *u) {
assert(u);
if (u->description)
return u->description;
return strna(u->id);
}
void unit_dump(Unit *u, FILE *f, const char *prefix) {
char *t, **j;
UnitDependency d;
Iterator i;
const char *prefix2;
char
timestamp1[FORMAT_TIMESTAMP_MAX],
timestamp2[FORMAT_TIMESTAMP_MAX],
timestamp3[FORMAT_TIMESTAMP_MAX],
timestamp4[FORMAT_TIMESTAMP_MAX],
timespan[FORMAT_TIMESPAN_MAX];
Unit *following;
_cleanup_set_free_ Set *following_set = NULL;
int r;
assert(u);
assert(u->type >= 0);
prefix = strempty(prefix);
prefix2 = strjoina(prefix, "\t");
fprintf(f,
"%s-> Unit %s:\n"
"%s\tDescription: %s\n"
"%s\tInstance: %s\n"
"%s\tUnit Load State: %s\n"
"%s\tUnit Active State: %s\n"
"%s\tInactive Exit Timestamp: %s\n"
"%s\tActive Enter Timestamp: %s\n"
"%s\tActive Exit Timestamp: %s\n"
"%s\tInactive Enter Timestamp: %s\n"
"%s\tGC Check Good: %s\n"
"%s\tNeed Daemon Reload: %s\n"
"%s\tTransient: %s\n"
"%s\tSlice: %s\n"
"%s\tCGroup: %s\n"
"%s\tCGroup realized: %s\n"
"%s\tCGroup mask: 0x%x\n"
"%s\tCGroup members mask: 0x%x\n",
prefix, u->id,
prefix, unit_description(u),
prefix, strna(u->instance),
prefix, unit_load_state_to_string(u->load_state),
prefix, unit_active_state_to_string(unit_active_state(u)),
prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
prefix, yes_no(unit_check_gc(u)),
prefix, yes_no(unit_need_daemon_reload(u)),
prefix, yes_no(u->transient),
prefix, strna(unit_slice_name(u)),
prefix, strna(u->cgroup_path),
prefix, yes_no(u->cgroup_realized),
prefix, u->cgroup_realized_mask,
prefix, u->cgroup_members_mask);
SET_FOREACH(t, u->names, i)
fprintf(f, "%s\tName: %s\n", prefix, t);
STRV_FOREACH(j, u->documentation)
fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
following = unit_following(u);
if (following)
fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
r = unit_following_set(u, &following_set);
if (r >= 0) {
Unit *other;
SET_FOREACH(other, following_set, i)
fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
}
if (u->fragment_path)
fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
if (u->source_path)
fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
STRV_FOREACH(j, u->dropin_paths)
fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
if (u->job_timeout > 0)
fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
if (u->job_timeout_action != FAILURE_ACTION_NONE)
fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, failure_action_to_string(u->job_timeout_action));
if (u->job_timeout_reboot_arg)
fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
if (dual_timestamp_is_set(&u->condition_timestamp))
fprintf(f,
"%s\tCondition Timestamp: %s\n"
"%s\tCondition Result: %s\n",
prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
prefix, yes_no(u->condition_result));
if (dual_timestamp_is_set(&u->assert_timestamp))
fprintf(f,
"%s\tAssert Timestamp: %s\n"
"%s\tAssert Result: %s\n",
prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
prefix, yes_no(u->assert_result));
for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
Unit *other;
SET_FOREACH(other, u->dependencies[d], i)
fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
}
if (!strv_isempty(u->requires_mounts_for)) {
fprintf(f,
"%s\tRequiresMountsFor:", prefix);
STRV_FOREACH(j, u->requires_mounts_for)
fprintf(f, " %s", *j);
fputs("\n", f);
}
if (u->load_state == UNIT_LOADED) {
fprintf(f,
"%s\tStopWhenUnneeded: %s\n"
"%s\tRefuseManualStart: %s\n"
"%s\tRefuseManualStop: %s\n"
"%s\tDefaultDependencies: %s\n"
"%s\tOnFailureJobMode: %s\n"
"%s\tIgnoreOnIsolate: %s\n",
prefix, yes_no(u->stop_when_unneeded),
prefix, yes_no(u->refuse_manual_start),
prefix, yes_no(u->refuse_manual_stop),
prefix, yes_no(u->default_dependencies),
prefix, job_mode_to_string(u->on_failure_job_mode),
prefix, yes_no(u->ignore_on_isolate));
if (UNIT_VTABLE(u)->dump)
UNIT_VTABLE(u)->dump(u, f, prefix2);
} else if (u->load_state == UNIT_MERGED)
fprintf(f,
"%s\tMerged into: %s\n",
prefix, u->merged_into->id);
else if (u->load_state == UNIT_ERROR)
fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
if (u->job)
job_dump(u->job, f, prefix2);
if (u->nop_job)
job_dump(u->nop_job, f, prefix2);
}
/* Common implementation for multiple backends */
int unit_load_fragment_and_dropin(Unit *u) {
int r;
assert(u);
/* Load a .{service,socket,...} file */
r = unit_load_fragment(u);
if (r < 0)
return r;
if (u->load_state == UNIT_STUB)
return -ENOENT;
/* Load drop-in directory data */
r = unit_load_dropin(unit_follow_merge(u));
if (r < 0)
return r;
return 0;
}
/* Common implementation for multiple backends */
int unit_load_fragment_and_dropin_optional(Unit *u) {
int r;
assert(u);
/* Same as unit_load_fragment_and_dropin(), but whether
* something can be loaded or not doesn't matter. */
/* Load a .service file */
r = unit_load_fragment(u);
if (r < 0)
return r;
if (u->load_state == UNIT_STUB)
u->load_state = UNIT_LOADED;
/* Load drop-in directory data */
r = unit_load_dropin(unit_follow_merge(u));
if (r < 0)
return r;
return 0;
}
int unit_add_default_target_dependency(Unit *u, Unit *target) {
assert(u);
assert(target);
if (target->type != UNIT_TARGET)
return 0;
/* Only add the dependency if both units are loaded, so that
* that loop check below is reliable */
if (u->load_state != UNIT_LOADED ||
target->load_state != UNIT_LOADED)
return 0;
/* If either side wants no automatic dependencies, then let's
* skip this */
if (!u->default_dependencies ||
!target->default_dependencies)
return 0;
/* Don't create loops */
if (set_get(target->dependencies[UNIT_BEFORE], u))
return 0;
return unit_add_dependency(target, UNIT_AFTER, u, true);
}
static int unit_add_target_dependencies(Unit *u) {
static const UnitDependency deps[] = {
UNIT_REQUIRED_BY,
UNIT_REQUISITE_OF,
UNIT_WANTED_BY,
UNIT_BOUND_BY
};
Unit *target;
Iterator i;
unsigned k;
int r = 0;
assert(u);
for (k = 0; k < ELEMENTSOF(deps); k++)
SET_FOREACH(target, u->dependencies[deps[k]], i) {
r = unit_add_default_target_dependency(u, target);
if (r < 0)
return r;
}
return r;
}
static int unit_add_slice_dependencies(Unit *u) {
assert(u);
if (!UNIT_HAS_CGROUP_CONTEXT(u))
return 0;
if (UNIT_ISSET(u->slice))
return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true);
if (unit_has_name(u, SPECIAL_ROOT_SLICE))
return 0;
return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true);
}
static int unit_add_mount_dependencies(Unit *u) {
char **i;
int r;
assert(u);
STRV_FOREACH(i, u->requires_mounts_for) {
char prefix[strlen(*i) + 1];
PATH_FOREACH_PREFIX_MORE(prefix, *i) {
_cleanup_free_ char *p = NULL;
Unit *m;
r = unit_name_from_path(prefix, ".mount", &p);
if (r < 0)
return r;
m = manager_get_unit(u->manager, p);
if (!m) {
/* Make sure to load the mount unit if
* it exists. If so the dependencies
* on this unit will be added later
* during the loading of the mount
* unit. */
(void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
continue;
}
if (m == u)
continue;
if (m->load_state != UNIT_LOADED)
continue;
r = unit_add_dependency(u, UNIT_AFTER, m, true);
if (r < 0)
return r;
if (m->fragment_path) {
r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
if (r < 0)
return r;
}
}
}
return 0;
}
static int unit_add_startup_units(Unit *u) {
CGroupContext *c;
int r;
c = unit_get_cgroup_context(u);
if (!c)
return 0;
if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
return 0;
r = set_ensure_allocated(&u->manager->startup_units, NULL);
if (r < 0)
return r;
return set_put(u->manager->startup_units, u);
}
int unit_load(Unit *u) {
int r;
assert(u);
if (u->in_load_queue) {
LIST_REMOVE(load_queue, u->manager->load_queue, u);
u->in_load_queue = false;
}
if (u->type == _UNIT_TYPE_INVALID)
return -EINVAL;
if (u->load_state != UNIT_STUB)
return 0;
if (UNIT_VTABLE(u)->load) {
r = UNIT_VTABLE(u)->load(u);
if (r < 0)
goto fail;
}
if (u->load_state == UNIT_STUB) {
r = -ENOENT;
goto fail;
}
if (u->load_state == UNIT_LOADED) {
r = unit_add_target_dependencies(u);
if (r < 0)
goto fail;
r = unit_add_slice_dependencies(u);
if (r < 0)
goto fail;
r = unit_add_mount_dependencies(u);
if (r < 0)
goto fail;
r = unit_add_startup_units(u);
if (r < 0)
goto fail;
if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
r = -EINVAL;
goto fail;
}
unit_update_cgroup_members_masks(u);
/* If we are reloading, we need to wait for the deserializer
* to restore the net_cls ids that have been set previously */
if (u->manager->n_reloading <= 0) {
r = unit_add_to_netclass_cgroup(u);
if (r < 0)
return r;
}
}
assert((u->load_state != UNIT_MERGED) == !u->merged_into);
unit_add_to_dbus_queue(unit_follow_merge(u));
unit_add_to_gc_queue(u);
return 0;
fail:
u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
u->load_error = r;
unit_add_to_dbus_queue(u);
unit_add_to_gc_queue(u);
log_unit_debug_errno(u, r, "Failed to load configuration: %m");
return r;
}
static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
Condition *c;
int triggered = -1;
assert(u);
assert(to_string);
/* If the condition list is empty, then it is true */
if (!first)
return true;
/* Otherwise, if all of the non-trigger conditions apply and
* if any of the trigger conditions apply (unless there are
* none) we return true */
LIST_FOREACH(conditions, c, first) {
int r;
r = condition_test(c);
if (r < 0)
log_unit_warning(u,
"Couldn't determine result for %s=%s%s%s, assuming failed: %m",
to_string(c->type),
c->trigger ? "|" : "",
c->negate ? "!" : "",
c->parameter);
else
log_unit_debug(u,
"%s=%s%s%s %s.",
to_string(c->type),
c->trigger ? "|" : "",
c->negate ? "!" : "",
c->parameter,
condition_result_to_string(c->result));
if (!c->trigger && r <= 0)
return false;
if (c->trigger && triggered <= 0)
triggered = r > 0;
}
return triggered != 0;
}
static bool unit_condition_test(Unit *u) {
assert(u);
dual_timestamp_get(&u->condition_timestamp);
u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
return u->condition_result;
}
static bool unit_assert_test(Unit *u) {
assert(u);
dual_timestamp_get(&u->assert_timestamp);
u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
return u->assert_result;
}
_pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
const char *format;
const UnitStatusMessageFormats *format_table;
assert(u);
assert(t == JOB_START || t == JOB_STOP || t == JOB_RELOAD);
if (t != JOB_RELOAD) {
format_table = &UNIT_VTABLE(u)->status_message_formats;
if (format_table) {
format = format_table->starting_stopping[t == JOB_STOP];
if (format)
return format;
}
}
/* Return generic strings */
if (t == JOB_START)
return "Starting %s.";
else if (t == JOB_STOP)
return "Stopping %s.";
else
return "Reloading %s.";
}
static void unit_status_print_starting_stopping(Unit *u, JobType t) {
const char *format;
assert(u);
format = unit_get_status_message_format(u, t);
DISABLE_WARNING_FORMAT_NONLITERAL;
unit_status_printf(u, "", format);
REENABLE_WARNING;
}
static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
const char *format;
char buf[LINE_MAX];
sd_id128_t mid;
assert(u);
if (t != JOB_START && t != JOB_STOP && t != JOB_RELOAD)
return;
if (log_on_console())
return;
/* We log status messages for all units and all operations. */
format = unit_get_status_message_format(u, t);
DISABLE_WARNING_FORMAT_NONLITERAL;
snprintf(buf, sizeof(buf), format, unit_description(u));
REENABLE_WARNING;
mid = t == JOB_START ? SD_MESSAGE_UNIT_STARTING :
t == JOB_STOP ? SD_MESSAGE_UNIT_STOPPING :
SD_MESSAGE_UNIT_RELOADING;
/* Note that we deliberately use LOG_MESSAGE() instead of
* LOG_UNIT_MESSAGE() here, since this is supposed to mimic
* closely what is written to screen using the status output,
* which is supposed the highest level, friendliest output
* possible, which means we should avoid the low-level unit
* name. */
log_struct(LOG_INFO,
LOG_MESSAGE_ID(mid),
LOG_UNIT_ID(u),
LOG_MESSAGE("%s", buf),
NULL);
}
void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
unit_status_log_starting_stopping_reloading(u, t);
/* Reload status messages have traditionally not been printed to console. */
if (t != JOB_RELOAD)
unit_status_print_starting_stopping(u, t);
}
/* Errors:
* -EBADR: This unit type does not support starting.
* -EALREADY: Unit is already started.
* -EAGAIN: An operation is already in progress. Retry later.
* -ECANCELED: Too many requests for now.
* -EPROTO: Assert failed
*/
int unit_start(Unit *u) {
UnitActiveState state;
Unit *following;
assert(u);
/* Units that aren't loaded cannot be started */
if (u->load_state != UNIT_LOADED)
return -EINVAL;
/* If this is already started, then this will succeed. Note
* that this will even succeed if this unit is not startable
* by the user. This is relied on to detect when we need to
* wait for units and when waiting is finished. */
state = unit_active_state(u);
if (UNIT_IS_ACTIVE_OR_RELOADING(state))
return -EALREADY;
/* If the conditions failed, don't do anything at all. If we
* already are activating this call might still be useful to
* speed up activation in case there is some hold-off time,
* but we don't want to recheck the condition in that case. */
if (state != UNIT_ACTIVATING &&
!unit_condition_test(u)) {
log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
return -EALREADY;
}
/* If the asserts failed, fail the entire job */
if (state != UNIT_ACTIVATING &&
!unit_assert_test(u)) {
log_unit_notice(u, "Starting requested but asserts failed.");
return -EPROTO;
}
/* Units of types that aren't supported cannot be
* started. Note that we do this test only after the condition
* checks, so that we rather return condition check errors
* (which are usually not considered a true failure) than "not
* supported" errors (which are considered a failure).
*/
if (!unit_supported(u))
return -EOPNOTSUPP;
/* Forward to the main object, if we aren't it. */
following = unit_following(u);
if (following) {
log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
return unit_start(following);
}
/* If it is stopped, but we cannot start it, then fail */
if (!UNIT_VTABLE(u)->start)
return -EBADR;
/* We don't suppress calls to ->start() here when we are
* already starting, to allow this request to be used as a
* "hurry up" call, for example when the unit is in some "auto
* restart" state where it waits for a holdoff timer to elapse
* before it will start again. */
unit_add_to_dbus_queue(u);
return UNIT_VTABLE(u)->start(u);
}
bool unit_can_start(Unit *u) {
assert(u);
if (u->load_state != UNIT_LOADED)
return false;
if (!unit_supported(u))
return false;
return !!UNIT_VTABLE(u)->start;
}
bool unit_can_isolate(Unit *u) {
assert(u);
return unit_can_start(u) &&
u->allow_isolate;
}
/* Errors:
* -EBADR: This unit type does not support stopping.
* -EALREADY: Unit is already stopped.
* -EAGAIN: An operation is already in progress. Retry later.
*/
int unit_stop(Unit *u) {
UnitActiveState state;
Unit *following;
assert(u);
state = unit_active_state(u);
if (UNIT_IS_INACTIVE_OR_FAILED(state))
return -EALREADY;
following = unit_following(u);
if (following) {
log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
return unit_stop(following);
}
if (!UNIT_VTABLE(u)->stop)
return -EBADR;
unit_add_to_dbus_queue(u);
return UNIT_VTABLE(u)->stop(u);
}
/* Errors:
* -EBADR: This unit type does not support reloading.
* -ENOEXEC: Unit is not started.
* -EAGAIN: An operation is already in progress. Retry later.
*/
int unit_reload(Unit *u) {
UnitActiveState state;
Unit *following;
assert(u);
if (u->load_state != UNIT_LOADED)
return -EINVAL;
if (!unit_can_reload(u))
return -EBADR;
state = unit_active_state(u);
if (state == UNIT_RELOADING)
return -EALREADY;
if (state != UNIT_ACTIVE) {
log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
return -ENOEXEC;
}
following = unit_following(u);
if (following) {
log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
return unit_reload(following);
}
unit_add_to_dbus_queue(u);
return UNIT_VTABLE(u)->reload(u);
}
bool unit_can_reload(Unit *u) {
assert(u);
if (!UNIT_VTABLE(u)->reload)
return false;
if (!UNIT_VTABLE(u)->can_reload)
return true;
return UNIT_VTABLE(u)->can_reload(u);
}
static void unit_check_unneeded(Unit *u) {
_cleanup_bus_error_free_ sd_bus_error error = SD_BUS_ERROR_NULL;
static const UnitDependency needed_dependencies[] = {
UNIT_REQUIRED_BY,
UNIT_REQUISITE_OF,
UNIT_WANTED_BY,
UNIT_BOUND_BY,
};
Unit *other;
Iterator i;
unsigned j;
int r;
assert(u);
/* If this service shall be shut down when unneeded then do
* so. */
if (!u->stop_when_unneeded)
return;
if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
return;
for (j = 0; j < ELEMENTSOF(needed_dependencies); j++)
SET_FOREACH(other, u->dependencies[needed_dependencies[j]], i)
if (unit_active_or_pending(other))
return;
/* If stopping a unit fails continously we might enter a stop
* loop here, hence stop acting on the service being
* unnecessary after a while. */
if (!ratelimit_test(&u->auto_stop_ratelimit)) {
log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
return;
}
log_unit_info(u, "Unit not needed anymore. Stopping.");
/* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
if (r < 0)
log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
}
static void unit_check_binds_to(Unit *u) {
_cleanup_bus_error_free_ sd_bus_error error = SD_BUS_ERROR_NULL;
bool stop = false;
Unit *other;
Iterator i;
int r;
assert(u);
if (u->job)
return;
if (unit_active_state(u) != UNIT_ACTIVE)
return;
SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
if (other->job)
continue;
if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
continue;
stop = true;
break;
}
if (!stop)
return;
/* If stopping a unit fails continously we might enter a stop
* loop here, hence stop acting on the service being
* unnecessary after a while. */
if (!ratelimit_test(&u->auto_stop_ratelimit)) {
log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
return;
}
assert(other);
log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
/* A unit we need to run is gone. Sniff. Let's stop this. */
r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
if (r < 0)
log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
}
static void retroactively_start_dependencies(Unit *u) {
Iterator i;
Unit *other;
assert(u);
assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
if (!set_get(u->dependencies[UNIT_AFTER], other) &&
!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
if (!set_get(u->dependencies[UNIT_AFTER], other) &&
!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
if (!set_get(u->dependencies[UNIT_AFTER], other) &&
!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
}
static void retroactively_stop_dependencies(Unit *u) {
Iterator i;
Unit *other;
assert(u);
assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
/* Pull down units which are bound to us recursively if enabled */
SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
}
static void check_unneeded_dependencies(Unit *u) {
Iterator i;
Unit *other;
assert(u);
assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
/* Garbage collect services that might not be needed anymore, if enabled */
SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
unit_check_unneeded(other);
SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
unit_check_unneeded(other);
SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
unit_check_unneeded(other);
SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
unit_check_unneeded(other);
}
void unit_start_on_failure(Unit *u) {
Unit *other;
Iterator i;
assert(u);
if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
return;
log_unit_info(u, "Triggering OnFailure= dependencies.");
SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
int r;
r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
if (r < 0)
log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
}
}
void unit_trigger_notify(Unit *u) {
Unit *other;
Iterator i;
assert(u);
SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
if (UNIT_VTABLE(other)->trigger_notify)
UNIT_VTABLE(other)->trigger_notify(other, u);
}
void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
Manager *m;
bool unexpected;
assert(u);
assert(os < _UNIT_ACTIVE_STATE_MAX);
assert(ns < _UNIT_ACTIVE_STATE_MAX);
/* Note that this is called for all low-level state changes,
* even if they might map to the same high-level
* UnitActiveState! That means that ns == os is an expected
* behavior here. For example: if a mount point is remounted
* this function will be called too! */
m = u->manager;
/* Update timestamps for state changes */
if (m->n_reloading <= 0) {
dual_timestamp ts;
dual_timestamp_get(&ts);
if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
u->inactive_exit_timestamp = ts;
else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
u->inactive_enter_timestamp = ts;
if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
u->active_enter_timestamp = ts;
else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
u->active_exit_timestamp = ts;
}
/* Keep track of failed units */
(void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
/* Make sure the cgroup is always removed when we become inactive */
if (UNIT_IS_INACTIVE_OR_FAILED(ns))
unit_prune_cgroup(u);
/* Note that this doesn't apply to RemainAfterExit services exiting
* successfully, since there's no change of state in that case. Which is
* why it is handled in service_set_state() */
if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
ExecContext *ec;
ec = unit_get_exec_context(u);
if (ec && exec_context_may_touch_console(ec)) {
if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
m->n_on_console --;
if (m->n_on_console == 0)
/* unset no_console_output flag, since the console is free */
m->no_console_output = false;
} else
m->n_on_console ++;
}
}
if (u->job) {
unexpected = false;
if (u->job->state == JOB_WAITING)
/* So we reached a different state for this
* job. Let's see if we can run it now if it
* failed previously due to EAGAIN. */
job_add_to_run_queue(u->job);
/* Let's check whether this state change constitutes a
* finished job, or maybe contradicts a running job and
* hence needs to invalidate jobs. */
switch (u->job->type) {
case JOB_START:
case JOB_VERIFY_ACTIVE:
if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
job_finish_and_invalidate(u->job, JOB_DONE, true);
else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
unexpected = true;
if (UNIT_IS_INACTIVE_OR_FAILED(ns))
job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
}
break;
case JOB_RELOAD:
case JOB_RELOAD_OR_START:
if (u->job->state == JOB_RUNNING) {
if (ns == UNIT_ACTIVE)
job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true);
else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
unexpected = true;
if (UNIT_IS_INACTIVE_OR_FAILED(ns))
job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
}
}
break;
case JOB_STOP:
case JOB_RESTART:
case JOB_TRY_RESTART:
if (UNIT_IS_INACTIVE_OR_FAILED(ns))
job_finish_and_invalidate(u->job, JOB_DONE, true);
else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
unexpected = true;
job_finish_and_invalidate(u->job, JOB_FAILED, true);
}
break;
default:
assert_not_reached("Job type unknown");
}
} else
unexpected = true;
if (m->n_reloading <= 0) {
/* If this state change happened without being
* requested by a job, then let's retroactively start
* or stop dependencies. We skip that step when
* deserializing, since we don't want to create any
* additional jobs just because something is already
* activated. */
if (unexpected) {
if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
retroactively_start_dependencies(u);
else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
retroactively_stop_dependencies(u);
}
/* stop unneeded units regardless if going down was expected or not */
if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
check_unneeded_dependencies(u);
if (ns != os && ns == UNIT_FAILED) {
log_unit_notice(u, "Unit entered failed state.");
unit_start_on_failure(u);
}
}
/* Some names are special */
if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
/* The bus might have just become available,
* hence try to connect to it, if we aren't
* yet connected. */
bus_init(m, true);
if (u->type == UNIT_SERVICE &&
!UNIT_IS_ACTIVE_OR_RELOADING(os) &&
m->n_reloading <= 0) {
/* Write audit record if we have just finished starting up */
manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
u->in_audit = true;
}
if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
manager_send_unit_plymouth(m, u);
} else {
/* We don't care about D-Bus here, since we'll get an
* asynchronous notification for it anyway. */
if (u->type == UNIT_SERVICE &&
UNIT_IS_INACTIVE_OR_FAILED(ns) &&
!UNIT_IS_INACTIVE_OR_FAILED(os) &&
m->n_reloading <= 0) {
/* Hmm, if there was no start record written
* write it now, so that we always have a nice
* pair */
if (!u->in_audit) {
manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
if (ns == UNIT_INACTIVE)
manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
} else
/* Write audit record if we have just finished shutting down */
manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
u->in_audit = false;
}
}
manager_recheck_journal(m);
unit_trigger_notify(u);
if (u->manager->n_reloading <= 0) {
/* Maybe we finished startup and are now ready for
* being stopped because unneeded? */
unit_check_unneeded(u);
/* Maybe we finished startup, but something we needed
* has vanished? Let's die then. (This happens when
* something BindsTo= to a Type=oneshot unit, as these
* units go directly from starting to inactive,
* without ever entering started.) */
unit_check_binds_to(u);
}
unit_add_to_dbus_queue(u);
unit_add_to_gc_queue(u);
}
int unit_watch_pid(Unit *u, pid_t pid) {
int q, r;
assert(u);
assert(pid >= 1);
/* Watch a specific PID. We only support one or two units
* watching each PID for now, not more. */
r = set_ensure_allocated(&u->pids, NULL);
if (r < 0)
return r;
r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
if (r < 0)
return r;
r = hashmap_put(u->manager->watch_pids1, PID_TO_PTR(pid), u);
if (r == -EEXIST) {
r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
if (r < 0)
return r;
r = hashmap_put(u->manager->watch_pids2, PID_TO_PTR(pid), u);
}
q = set_put(u->pids, PID_TO_PTR(pid));
if (q < 0)
return q;
return r;
}
void unit_unwatch_pid(Unit *u, pid_t pid) {
assert(u);
assert(pid >= 1);
(void) hashmap_remove_value(u->manager->watch_pids1, PID_TO_PTR(pid), u);
(void) hashmap_remove_value(u->manager->watch_pids2, PID_TO_PTR(pid), u);
(void) set_remove(u->pids, PID_TO_PTR(pid));
}
void unit_unwatch_all_pids(Unit *u) {
assert(u);
while (!set_isempty(u->pids))
unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
u->pids = set_free(u->pids);
}
void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
Iterator i;
void *e;
assert(u);
/* Cleans dead PIDs from our list */
SET_FOREACH(e, u->pids, i) {
pid_t pid = PTR_TO_PID(e);
if (pid == except1 || pid == except2)
continue;
if (!pid_is_unwaited(pid))
unit_unwatch_pid(u, pid);
}
}
bool unit_job_is_applicable(Unit *u, JobType j) {
assert(u);
assert(j >= 0 && j < _JOB_TYPE_MAX);
switch (j) {
case JOB_VERIFY_ACTIVE:
case JOB_START:
case JOB_STOP:
case JOB_NOP:
return true;
case JOB_RESTART:
case JOB_TRY_RESTART:
return unit_can_start(u);
case JOB_RELOAD:
return unit_can_reload(u);
case JOB_RELOAD_OR_START:
return unit_can_reload(u) && unit_can_start(u);
default:
assert_not_reached("Invalid job type");
}
}
static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
assert(u);
/* Only warn about some unit types */
if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
return;
if (streq_ptr(u->id, other))
log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
else
log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
}
int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
[UNIT_REQUIRES] = UNIT_REQUIRED_BY,
[UNIT_WANTS] = UNIT_WANTED_BY,
[UNIT_REQUISITE] = UNIT_REQUISITE_OF,
[UNIT_BINDS_TO] = UNIT_BOUND_BY,
[UNIT_PART_OF] = UNIT_CONSISTS_OF,
[UNIT_REQUIRED_BY] = UNIT_REQUIRES,
[UNIT_REQUISITE_OF] = UNIT_REQUISITE,
[UNIT_WANTED_BY] = UNIT_WANTS,
[UNIT_BOUND_BY] = UNIT_BINDS_TO,
[UNIT_CONSISTS_OF] = UNIT_PART_OF,
[UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
[UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
[UNIT_BEFORE] = UNIT_AFTER,
[UNIT_AFTER] = UNIT_BEFORE,
[UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
[UNIT_REFERENCES] = UNIT_REFERENCED_BY,
[UNIT_REFERENCED_BY] = UNIT_REFERENCES,
[UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
[UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
[UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
[UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
[UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
};
int r, q = 0, v = 0, w = 0;
Unit *orig_u = u, *orig_other = other;
assert(u);
assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
assert(other);
u = unit_follow_merge(u);
other = unit_follow_merge(other);
/* We won't allow dependencies on ourselves. We will not
* consider them an error however. */
if (u == other) {
maybe_warn_about_dependency(orig_u, orig_other->id, d);
return 0;
}
r = set_ensure_allocated(&u->dependencies[d], NULL);
if (r < 0)
return r;
if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
if (r < 0)
return r;
}
if (add_reference) {
r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
if (r < 0)
return r;
r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
if (r < 0)
return r;
}
q = set_put(u->dependencies[d], other);
if (q < 0)
return q;
if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
v = set_put(other->dependencies[inverse_table[d]], u);
if (v < 0) {
r = v;
goto fail;
}
}
if (add_reference) {
w = set_put(u->dependencies[UNIT_REFERENCES], other);
if (w < 0) {
r = w;
goto fail;
}
r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
if (r < 0)
goto fail;
}
unit_add_to_dbus_queue(u);
return 0;
fail:
if (q > 0)
set_remove(u->dependencies[d], other);
if (v > 0)
set_remove(other->dependencies[inverse_table[d]], u);
if (w > 0)
set_remove(u->dependencies[UNIT_REFERENCES], other);
return r;
}
int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
int r;
assert(u);
r = unit_add_dependency(u, d, other, add_reference);
if (r < 0)
return r;
return unit_add_dependency(u, e, other, add_reference);
}
static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
int r;
assert(u);
assert(name || path);
assert(buf);
assert(ret);
if (!name)
name = basename(path);
if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
*buf = NULL;
*ret = name;
return 0;
}
if (u->instance)
r = unit_name_replace_instance(name, u->instance, buf);
else {
_cleanup_free_ char *i = NULL;
r = unit_name_to_prefix(u->id, &i);
if (r < 0)
return r;
r = unit_name_replace_instance(name, i, buf);
}
if (r < 0)
return r;
*ret = *buf;
return 0;
}
int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
_cleanup_free_ char *buf = NULL;
Unit *other;
int r;
assert(u);
assert(name || path);
r = resolve_template(u, name, path, &buf, &name);
if (r < 0)
return r;
r = manager_load_unit(u->manager, name, path, NULL, &other);
if (r < 0)
return r;
return unit_add_dependency(u, d, other, add_reference);
}
int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
_cleanup_free_ char *buf = NULL;
Unit *other;
int r;
assert(u);
assert(name || path);
r = resolve_template(u, name, path, &buf, &name);
if (r < 0)
return r;
r = manager_load_unit(u->manager, name, path, NULL, &other);
if (r < 0)
return r;
return unit_add_two_dependencies(u, d, e, other, add_reference);
}
int set_unit_path(const char *p) {
/* This is mostly for debug purposes */
if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
return -errno;
return 0;
}
char *unit_dbus_path(Unit *u) {
assert(u);
if (!u->id)
return NULL;
return unit_dbus_path_from_name(u->id);
}
int unit_set_slice(Unit *u, Unit *slice) {
assert(u);
assert(slice);
/* Sets the unit slice if it has not been set before. Is extra
* careful, to only allow this for units that actually have a
* cgroup context. Also, we don't allow to set this for slices
* (since the parent slice is derived from the name). Make
* sure the unit we set is actually a slice. */
if (!UNIT_HAS_CGROUP_CONTEXT(u))
return -EOPNOTSUPP;
if (u->type == UNIT_SLICE)
return -EINVAL;
if (unit_active_state(u) != UNIT_INACTIVE)
return -EBUSY;
if (slice->type != UNIT_SLICE)
return -EINVAL;
if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
!unit_has_name(slice, SPECIAL_ROOT_SLICE))
return -EPERM;
if (UNIT_DEREF(u->slice) == slice)
return 0;
if (UNIT_ISSET(u->slice))
return -EBUSY;
unit_ref_set(&u->slice, slice);
return 1;
}
int unit_set_default_slice(Unit *u) {
_cleanup_free_ char *b = NULL;
const char *slice_name;
Unit *slice;
int r;
assert(u);
if (UNIT_ISSET(u->slice))
return 0;
if (u->instance) {
_cleanup_free_ char *prefix = NULL, *escaped = NULL;
/* Implicitly place all instantiated units in their
* own per-template slice */
r = unit_name_to_prefix(u->id, &prefix);
if (r < 0)
return r;
/* The prefix is already escaped, but it might include
* "-" which has a special meaning for slice units,
* hence escape it here extra. */
escaped = unit_name_escape(prefix);
if (!escaped)
return -ENOMEM;
if (u->manager->running_as == MANAGER_SYSTEM)
b = strjoin("system-", escaped, ".slice", NULL);
else
b = strappend(escaped, ".slice");
if (!b)
return -ENOMEM;
slice_name = b;
} else
slice_name =
u->manager->running_as == MANAGER_SYSTEM && !unit_has_name(u, SPECIAL_INIT_SCOPE)
? SPECIAL_SYSTEM_SLICE
: SPECIAL_ROOT_SLICE;
r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
if (r < 0)
return r;
return unit_set_slice(u, slice);
}
const char *unit_slice_name(Unit *u) {
assert(u);
if (!UNIT_ISSET(u->slice))
return NULL;
return UNIT_DEREF(u->slice)->id;
}
int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
_cleanup_free_ char *t = NULL;
int r;
assert(u);
assert(type);
assert(_found);
r = unit_name_change_suffix(u->id, type, &t);
if (r < 0)
return r;
if (unit_has_name(u, t))
return -EINVAL;
r = manager_load_unit(u->manager, t, NULL, NULL, _found);
assert(r < 0 || *_found != u);
return r;
}
static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
const char *name, *old_owner, *new_owner;
Unit *u = userdata;
int r;
assert(message);
assert(u);
r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
if (r < 0) {
bus_log_parse_error(r);
return 0;
}
if (UNIT_VTABLE(u)->bus_name_owner_change)
UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
return 0;
}
int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
const char *match;
assert(u);
assert(bus);
assert(name);
if (u->match_bus_slot)
return -EBUSY;
match = strjoina("type='signal',"
"sender='org.freedesktop.DBus',"
"path='/org/freedesktop/DBus',"
"interface='org.freedesktop.DBus',"
"member='NameOwnerChanged',"
"arg0='", name, "'",
NULL);
return sd_bus_add_match(bus, &u->match_bus_slot, match, signal_name_owner_changed, u);
}
int unit_watch_bus_name(Unit *u, const char *name) {
int r;
assert(u);
assert(name);
/* Watch a specific name on the bus. We only support one unit
* watching each name for now. */
if (u->manager->api_bus) {
/* If the bus is already available, install the match directly.
* Otherwise, just put the name in the list. bus_setup_api() will take care later. */
r = unit_install_bus_match(u, u->manager->api_bus, name);
if (r < 0)
return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
}
r = hashmap_put(u->manager->watch_bus, name, u);
if (r < 0) {
u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
}
return 0;
}
void unit_unwatch_bus_name(Unit *u, const char *name) {
assert(u);
assert(name);
hashmap_remove_value(u->manager->watch_bus, name, u);
u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
}
bool unit_can_serialize(Unit *u) {
assert(u);
return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
}
int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
int r;
assert(u);
assert(f);
assert(fds);
if (unit_can_serialize(u)) {
ExecRuntime *rt;
r = UNIT_VTABLE(u)->serialize(u, f, fds);
if (r < 0)
return r;
rt = unit_get_exec_runtime(u);
if (rt) {
r = exec_runtime_serialize(u, rt, f, fds);
if (r < 0)
return r;
}
}
dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
if (dual_timestamp_is_set(&u->condition_timestamp))
unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
if (dual_timestamp_is_set(&u->assert_timestamp))
unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
unit_serialize_item(u, f, "transient", yes_no(u->transient));
unit_serialize_item_format(u, f, "cpuacct-usage-base", "%" PRIu64, u->cpuacct_usage_base);
if (u->cgroup_path)
unit_serialize_item(u, f, "cgroup", u->cgroup_path);
unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
if (u->cgroup_netclass_id)
unit_serialize_item_format(u, f, "netclass-id", "%" PRIu32, u->cgroup_netclass_id);
if (serialize_jobs) {
if (u->job) {
fprintf(f, "job\n");
job_serialize(u->job, f, fds);
}
if (u->nop_job) {
fprintf(f, "job\n");
job_serialize(u->nop_job, f, fds);
}
}
/* End marker */
fputc('\n', f);
return 0;
}
int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
assert(u);
assert(f);
assert(key);
if (!value)
return 0;
fputs(key, f);
fputc('=', f);
fputs(value, f);
fputc('\n', f);
return 1;
}
int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
_cleanup_free_ char *c = NULL;
assert(u);
assert(f);
assert(key);
if (!value)
return 0;
c = cescape(value);
if (!c)
return -ENOMEM;
fputs(key, f);
fputc('=', f);
fputs(c, f);
fputc('\n', f);
return 1;
}
int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
int copy;
assert(u);
assert(f);
assert(key);
if (fd < 0)
return 0;
copy = fdset_put_dup(fds, fd);
if (copy < 0)
return copy;
fprintf(f, "%s=%i\n", key, copy);
return 1;
}
void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
va_list ap;
assert(u);
assert(f);
assert(key);
assert(format);
fputs(key, f);
fputc('=', f);
va_start(ap, format);
vfprintf(f, format, ap);
va_end(ap);
fputc('\n', f);
}
int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
ExecRuntime **rt = NULL;
size_t offset;
int r;
assert(u);
assert(f);
assert(fds);
offset = UNIT_VTABLE(u)->exec_runtime_offset;
if (offset > 0)
rt = (ExecRuntime**) ((uint8_t*) u + offset);
for (;;) {
char line[LINE_MAX], *l, *v;
size_t k;
if (!fgets(line, sizeof(line), f)) {
if (feof(f))
return 0;
return -errno;
}
char_array_0(line);
l = strstrip(line);
/* End marker */
if (isempty(l))
return 0;
k = strcspn(l, "=");
if (l[k] == '=') {
l[k] = 0;
v = l+k+1;
} else
v = l+k;
if (streq(l, "job")) {
if (v[0] == '\0') {
/* new-style serialized job */
Job *j;
j = job_new_raw(u);
if (!j)
return log_oom();
r = job_deserialize(j, f, fds);
if (r < 0) {
job_free(j);
return r;
}
r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
if (r < 0) {
job_free(j);
return r;
}
r = job_install_deserialized(j);
if (r < 0) {
hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
job_free(j);
return r;
}
} else /* legacy for pre-44 */
log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
continue;
} else if (streq(l, "inactive-exit-timestamp")) {
dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
continue;
} else if (streq(l, "active-enter-timestamp")) {
dual_timestamp_deserialize(v, &u->active_enter_timestamp);
continue;
} else if (streq(l, "active-exit-timestamp")) {
dual_timestamp_deserialize(v, &u->active_exit_timestamp);
continue;
} else if (streq(l, "inactive-enter-timestamp")) {
dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
continue;
} else if (streq(l, "condition-timestamp")) {
dual_timestamp_deserialize(v, &u->condition_timestamp);
continue;
} else if (streq(l, "assert-timestamp")) {
dual_timestamp_deserialize(v, &u->assert_timestamp);
continue;
} else if (streq(l, "condition-result")) {
r = parse_boolean(v);
if (r < 0)
log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
else
u->condition_result = r;
continue;
} else if (streq(l, "assert-result")) {
r = parse_boolean(v);
if (r < 0)
log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
else
u->assert_result = r;
continue;
} else if (streq(l, "transient")) {
r = parse_boolean(v);
if (r < 0)
log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
else
u->transient = r;
continue;
} else if (streq(l, "cpuacct-usage-base")) {
r = safe_atou64(v, &u->cpuacct_usage_base);
if (r < 0)
log_unit_debug(u, "Failed to parse CPU usage %s, ignoring.", v);
continue;
} else if (streq(l, "cgroup")) {
r = unit_set_cgroup_path(u, v);
if (r < 0)
log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
(void) unit_watch_cgroup(u);
continue;
} else if (streq(l, "cgroup-realized")) {
int b;
b = parse_boolean(v);
if (b < 0)
log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
else
u->cgroup_realized = b;
continue;
} else if (streq(l, "netclass-id")) {
r = safe_atou32(v, &u->cgroup_netclass_id);
if (r < 0)
log_unit_debug(u, "Failed to parse netclass ID %s, ignoring.", v);
else {
r = unit_add_to_netclass_cgroup(u);
if (r < 0)
log_unit_debug_errno(u, r, "Failed to add unit to netclass cgroup, ignoring: %m");
}
continue;
}
if (unit_can_serialize(u)) {
if (rt) {
r = exec_runtime_deserialize_item(u, rt, l, v, fds);
if (r < 0) {
log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
continue;
}
/* Returns positive if key was handled by the call */
if (r > 0)
continue;
}
r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
if (r < 0)
log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
}
}
}
int unit_add_node_link(Unit *u, const char *what, bool wants) {
Unit *device;
_cleanup_free_ char *e = NULL;
int r;
assert(u);
/* Adds in links to the device node that this unit is based on */
if (isempty(what))
return 0;
if (!is_device_path(what))
return 0;
/* When device units aren't supported (such as in a
* container), don't create dependencies on them. */
if (!unit_type_supported(UNIT_DEVICE))
return 0;
r = unit_name_from_path(what, ".device", &e);
if (r < 0)
return r;
r = manager_load_unit(u->manager, e, NULL, NULL, &device);
if (r < 0)
return r;
r = unit_add_two_dependencies(u, UNIT_AFTER, u->manager->running_as == MANAGER_SYSTEM ? UNIT_BINDS_TO : UNIT_WANTS, device, true);
if (r < 0)
return r;
if (wants) {
r = unit_add_dependency(device, UNIT_WANTS, u, false);
if (r < 0)
return r;
}
return 0;
}
int unit_coldplug(Unit *u) {
int r = 0, q = 0;
assert(u);
/* Make sure we don't enter a loop, when coldplugging
* recursively. */
if (u->coldplugged)
return 0;
u->coldplugged = true;
if (UNIT_VTABLE(u)->coldplug)
r = UNIT_VTABLE(u)->coldplug(u);
if (u->job)
q = job_coldplug(u->job);
if (r < 0)
return r;
if (q < 0)
return q;
return 0;
}
void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
DISABLE_WARNING_FORMAT_NONLITERAL;
manager_status_printf(u->manager, STATUS_TYPE_NORMAL,
status, unit_status_msg_format, unit_description(u));
REENABLE_WARNING;
}
bool unit_need_daemon_reload(Unit *u) {
_cleanup_strv_free_ char **t = NULL;
char **path;
struct stat st;
unsigned loaded_cnt, current_cnt;
assert(u);
if (u->fragment_path) {
zero(st);
if (stat(u->fragment_path, &st) < 0)
/* What, cannot access this anymore? */
return true;
if (u->fragment_mtime > 0 &&
timespec_load(&st.st_mtim) != u->fragment_mtime)
return true;
}
if (u->source_path) {
zero(st);
if (stat(u->source_path, &st) < 0)
return true;
if (u->source_mtime > 0 &&
timespec_load(&st.st_mtim) != u->source_mtime)
return true;
}
(void) unit_find_dropin_paths(u, &t);
loaded_cnt = strv_length(t);
current_cnt = strv_length(u->dropin_paths);
if (loaded_cnt == current_cnt) {
if (loaded_cnt == 0)
return false;
if (strv_overlap(u->dropin_paths, t)) {
STRV_FOREACH(path, u->dropin_paths) {
zero(st);
if (stat(*path, &st) < 0)
return true;
if (u->dropin_mtime > 0 &&
timespec_load(&st.st_mtim) > u->dropin_mtime)
return true;
}
return false;
} else
return true;
} else
return true;
}
void unit_reset_failed(Unit *u) {
assert(u);
if (UNIT_VTABLE(u)->reset_failed)
UNIT_VTABLE(u)->reset_failed(u);
}
Unit *unit_following(Unit *u) {
assert(u);
if (UNIT_VTABLE(u)->following)
return UNIT_VTABLE(u)->following(u);
return NULL;
}
bool unit_stop_pending(Unit *u) {
assert(u);
/* This call does check the current state of the unit. It's
* hence useful to be called from state change calls of the
* unit itself, where the state isn't updated yet. This is
* different from unit_inactive_or_pending() which checks both
* the current state and for a queued job. */
return u->job && u->job->type == JOB_STOP;
}
bool unit_inactive_or_pending(Unit *u) {
assert(u);
/* Returns true if the unit is inactive or going down */
if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
return true;
if (unit_stop_pending(u))
return true;
return false;
}
bool unit_active_or_pending(Unit *u) {
assert(u);
/* Returns true if the unit is active or going up */
if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
return true;
if (u->job &&
(u->job->type == JOB_START ||
u->job->type == JOB_RELOAD_OR_START ||
u->job->type == JOB_RESTART))
return true;
return false;
}
int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
assert(u);
assert(w >= 0 && w < _KILL_WHO_MAX);
assert(signo > 0);
assert(signo < _NSIG);
if (!UNIT_VTABLE(u)->kill)
return -EOPNOTSUPP;
return UNIT_VTABLE(u)->kill(u, w, signo, error);
}
static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
Set *pid_set;
int r;
pid_set = set_new(NULL);
if (!pid_set)
return NULL;
/* Exclude the main/control pids from being killed via the cgroup */
if (main_pid > 0) {
r = set_put(pid_set, PID_TO_PTR(main_pid));
if (r < 0)
goto fail;
}
if (control_pid > 0) {
r = set_put(pid_set, PID_TO_PTR(control_pid));
if (r < 0)
goto fail;
}
return pid_set;
fail:
set_free(pid_set);
return NULL;
}
int unit_kill_common(
Unit *u,
KillWho who,
int signo,
pid_t main_pid,
pid_t control_pid,
sd_bus_error *error) {
int r = 0;
bool killed = false;
if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
if (main_pid < 0)
return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
else if (main_pid == 0)
return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
}
if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
if (control_pid < 0)
return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
else if (control_pid == 0)
return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
}
if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
if (control_pid > 0) {
if (kill(control_pid, signo) < 0)
r = -errno;
else
killed = true;
}
if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
if (main_pid > 0) {
if (kill(main_pid, signo) < 0)
r = -errno;
else
killed = true;
}
if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
_cleanup_set_free_ Set *pid_set = NULL;
int q;
/* Exclude the main/control pids from being killed via the cgroup */
pid_set = unit_pid_set(main_pid, control_pid);
if (!pid_set)
return -ENOMEM;
q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, false, false, false, pid_set);
if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
r = q;
else
killed = true;
}
if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_ALL_FAIL))
return -ESRCH;
return r;
}
int unit_following_set(Unit *u, Set **s) {
assert(u);
assert(s);
if (UNIT_VTABLE(u)->following_set)
return UNIT_VTABLE(u)->following_set(u, s);
*s = NULL;
return 0;
}
UnitFileState unit_get_unit_file_state(Unit *u) {
int r;
assert(u);
if (u->unit_file_state < 0 && u->fragment_path) {
r = unit_file_get_state(
u->manager->running_as == MANAGER_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
NULL,
basename(u->fragment_path),
&u->unit_file_state);
if (r < 0)
u->unit_file_state = UNIT_FILE_BAD;
}
return u->unit_file_state;
}
int unit_get_unit_file_preset(Unit *u) {
assert(u);
if (u->unit_file_preset < 0 && u->fragment_path)
u->unit_file_preset = unit_file_query_preset(
u->manager->running_as == MANAGER_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
NULL,
basename(u->fragment_path));
return u->unit_file_preset;
}
Unit* unit_ref_set(UnitRef *ref, Unit *u) {
assert(ref);
assert(u);
if (ref->unit)
unit_ref_unset(ref);
ref->unit = u;
LIST_PREPEND(refs, u->refs, ref);
return u;
}
void unit_ref_unset(UnitRef *ref) {
assert(ref);
if (!ref->unit)
return;
LIST_REMOVE(refs, ref->unit->refs, ref);
ref->unit = NULL;
}
int unit_patch_contexts(Unit *u) {
CGroupContext *cc;
ExecContext *ec;
unsigned i;
int r;
assert(u);
/* Patch in the manager defaults into the exec and cgroup
* contexts, _after_ the rest of the settings have been
* initialized */
ec = unit_get_exec_context(u);
if (ec) {
/* This only copies in the ones that need memory */
for (i = 0; i < _RLIMIT_MAX; i++)
if (u->manager->rlimit[i] && !ec->rlimit[i]) {
ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
if (!ec->rlimit[i])
return -ENOMEM;
}
if (u->manager->running_as == MANAGER_USER &&
!ec->working_directory) {
r = get_home_dir(&ec->working_directory);
if (r < 0)
return r;
/* Allow user services to run, even if the
* home directory is missing */
ec->working_directory_missing_ok = true;
}
if (u->manager->running_as == MANAGER_USER &&
(ec->syscall_whitelist ||
!set_isempty(ec->syscall_filter) ||
!set_isempty(ec->syscall_archs) ||
ec->address_families_whitelist ||
!set_isempty(ec->address_families)))
ec->no_new_privileges = true;
if (ec->private_devices)
ec->capability_bounding_set_drop |= (uint64_t) 1ULL << (uint64_t) CAP_MKNOD;
}
cc = unit_get_cgroup_context(u);
if (cc) {
if (ec &&
ec->private_devices &&
cc->device_policy == CGROUP_AUTO)
cc->device_policy = CGROUP_CLOSED;
}
return 0;
}
ExecContext *unit_get_exec_context(Unit *u) {
size_t offset;
assert(u);
if (u->type < 0)
return NULL;
offset = UNIT_VTABLE(u)->exec_context_offset;
if (offset <= 0)
return NULL;
return (ExecContext*) ((uint8_t*) u + offset);
}
KillContext *unit_get_kill_context(Unit *u) {
size_t offset;
assert(u);
if (u->type < 0)
return NULL;
offset = UNIT_VTABLE(u)->kill_context_offset;
if (offset <= 0)
return NULL;
return (KillContext*) ((uint8_t*) u + offset);
}
CGroupContext *unit_get_cgroup_context(Unit *u) {
size_t offset;
if (u->type < 0)
return NULL;
offset = UNIT_VTABLE(u)->cgroup_context_offset;
if (offset <= 0)
return NULL;
return (CGroupContext*) ((uint8_t*) u + offset);
}
ExecRuntime *unit_get_exec_runtime(Unit *u) {
size_t offset;
if (u->type < 0)
return NULL;
offset = UNIT_VTABLE(u)->exec_runtime_offset;
if (offset <= 0)
return NULL;
return *(ExecRuntime**) ((uint8_t*) u + offset);
}
static int unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode, bool transient, char **dir) {
assert(u);
if (u->manager->running_as == MANAGER_USER) {
int r;
if (mode == UNIT_PERSISTENT && !transient)
r = user_config_home(dir);
else
r = user_runtime_dir(dir);
if (r == 0)
return -ENOENT;
return r;
}
if (mode == UNIT_PERSISTENT && !transient)
*dir = strdup("/etc/systemd/system");
else
*dir = strdup("/run/systemd/system");
if (!*dir)
return -ENOMEM;
return 0;
}
int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
_cleanup_free_ char *dir = NULL, *p = NULL, *q = NULL;
int r;
assert(u);
if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
return 0;
r = unit_drop_in_dir(u, mode, u->transient, &dir);
if (r < 0)
return r;
r = write_drop_in(dir, u->id, 50, name, data);
if (r < 0)
return r;
r = drop_in_file(dir, u->id, 50, name, &p, &q);
if (r < 0)
return r;
r = strv_extend(&u->dropin_paths, q);
if (r < 0)
return r;
strv_sort(u->dropin_paths);
strv_uniq(u->dropin_paths);
u->dropin_mtime = now(CLOCK_REALTIME);
return 0;
}
int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
_cleanup_free_ char *p = NULL;
va_list ap;
int r;
assert(u);
assert(name);
assert(format);
if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
return 0;
va_start(ap, format);
r = vasprintf(&p, format, ap);
va_end(ap);
if (r < 0)
return -ENOMEM;
return unit_write_drop_in(u, mode, name, p);
}
int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
_cleanup_free_ char *ndata = NULL;
assert(u);
assert(name);
assert(data);
if (!UNIT_VTABLE(u)->private_section)
return -EINVAL;
if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
return 0;
ndata = strjoin("[", UNIT_VTABLE(u)->private_section, "]\n", data, NULL);
if (!ndata)
return -ENOMEM;
return unit_write_drop_in(u, mode, name, ndata);
}
int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
_cleanup_free_ char *p = NULL;
va_list ap;
int r;
assert(u);
assert(name);
assert(format);
if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
return 0;
va_start(ap, format);
r = vasprintf(&p, format, ap);
va_end(ap);
if (r < 0)
return -ENOMEM;
return unit_write_drop_in_private(u, mode, name, p);
}
int unit_make_transient(Unit *u) {
assert(u);
if (!UNIT_VTABLE(u)->can_transient)
return -EOPNOTSUPP;
u->load_state = UNIT_STUB;
u->load_error = 0;
u->transient = true;
u->fragment_path = mfree(u->fragment_path);
return 0;
}
int unit_kill_context(
Unit *u,
KillContext *c,
KillOperation k,
pid_t main_pid,
pid_t control_pid,
bool main_pid_alien) {
bool wait_for_exit = false;
int sig, r;
assert(u);
assert(c);
if (c->kill_mode == KILL_NONE)
return 0;
switch (k) {
case KILL_KILL:
sig = SIGKILL;
break;
case KILL_ABORT:
sig = SIGABRT;
break;
case KILL_TERMINATE:
sig = c->kill_signal;
break;
default:
assert_not_reached("KillOperation unknown");
}
if (main_pid > 0) {
r = kill_and_sigcont(main_pid, sig);
if (r < 0 && r != -ESRCH) {
_cleanup_free_ char *comm = NULL;
get_process_comm(main_pid, &comm);
log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
} else {
if (!main_pid_alien)
wait_for_exit = true;
if (c->send_sighup && k == KILL_TERMINATE)
(void) kill(main_pid, SIGHUP);
}
}
if (control_pid > 0) {
r = kill_and_sigcont(control_pid, sig);
if (r < 0 && r != -ESRCH) {
_cleanup_free_ char *comm = NULL;
get_process_comm(control_pid, &comm);
log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
} else {
wait_for_exit = true;
if (c->send_sighup && k == KILL_TERMINATE)
(void) kill(control_pid, SIGHUP);
}
}
if (u->cgroup_path &&
(c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
_cleanup_set_free_ Set *pid_set = NULL;
/* Exclude the main/control pids from being killed via the cgroup */
pid_set = unit_pid_set(main_pid, control_pid);
if (!pid_set)
return -ENOMEM;
r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, sig, true, k != KILL_TERMINATE, false, pid_set);
if (r < 0) {
if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
} else if (r > 0) {
/* FIXME: For now, on the legacy hierarchy, we
* will not wait for the cgroup members to die
* if we are running in a container or if this
* is a delegation unit, simply because cgroup
* notification is unreliable in these
* cases. It doesn't work at all in
* containers, and outside of containers it
* can be confused easily by left-over
* directories in the cgroup -- which however
* should not exist in non-delegated units. On
* the unified hierarchy that's different,
* there we get proper events. Hence rely on
* them.*/
if (cg_unified() > 0 ||
(detect_container() == 0 && !unit_cgroup_delegate(u)))
wait_for_exit = true;
if (c->send_sighup && k != KILL_KILL) {
set_free(pid_set);
pid_set = unit_pid_set(main_pid, control_pid);
if (!pid_set)
return -ENOMEM;
cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, SIGHUP, false, true, false, pid_set);
}
}
}
return wait_for_exit;
}
int unit_require_mounts_for(Unit *u, const char *path) {
char prefix[strlen(path) + 1], *p;
int r;
assert(u);
assert(path);
/* Registers a unit for requiring a certain path and all its
* prefixes. We keep a simple array of these paths in the
* unit, since its usually short. However, we build a prefix
* table for all possible prefixes so that new appearing mount
* units can easily determine which units to make themselves a
* dependency of. */
if (!path_is_absolute(path))
return -EINVAL;
p = strdup(path);
if (!p)
return -ENOMEM;
path_kill_slashes(p);
if (!path_is_safe(p)) {
free(p);
return -EPERM;
}
if (strv_contains(u->requires_mounts_for, p)) {
free(p);
return 0;
}
r = strv_consume(&u->requires_mounts_for, p);
if (r < 0)
return r;
PATH_FOREACH_PREFIX_MORE(prefix, p) {
Set *x;
x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
if (!x) {
char *q;
r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
if (r < 0)
return r;
q = strdup(prefix);
if (!q)
return -ENOMEM;
x = set_new(NULL);
if (!x) {
free(q);
return -ENOMEM;
}
r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
if (r < 0) {
free(q);
set_free(x);
return r;
}
}
r = set_put(x, u);
if (r < 0)
return r;
}
return 0;
}
int unit_setup_exec_runtime(Unit *u) {
ExecRuntime **rt;
size_t offset;
Iterator i;
Unit *other;
offset = UNIT_VTABLE(u)->exec_runtime_offset;
assert(offset > 0);
/* Check if there already is an ExecRuntime for this unit? */
rt = (ExecRuntime**) ((uint8_t*) u + offset);
if (*rt)
return 0;
/* Try to get it from somebody else */
SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
*rt = unit_get_exec_runtime(other);
if (*rt) {
exec_runtime_ref(*rt);
return 0;
}
}
return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
}
bool unit_type_supported(UnitType t) {
if (_unlikely_(t < 0))
return false;
if (_unlikely_(t >= _UNIT_TYPE_MAX))
return false;
if (!unit_vtable[t]->supported)
return true;
return unit_vtable[t]->supported();
}
void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
int r;
assert(u);
assert(where);
r = dir_is_empty(where);
if (r > 0)
return;
if (r < 0) {
log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
return;
}
log_struct(LOG_NOTICE,
LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING),
LOG_UNIT_ID(u),
LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
"WHERE=%s", where,
NULL);
}
int unit_fail_if_symlink(Unit *u, const char* where) {
int r;
assert(u);
assert(where);
r = is_symlink(where);
if (r < 0) {
log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where);
return 0;
}
if (r == 0)
return 0;
log_struct(LOG_ERR,
LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING),
LOG_UNIT_ID(u),
LOG_UNIT_MESSAGE(u, "Mount on symlink %s not allowed.", where),
"WHERE=%s", where,
NULL);
return -ELOOP;
}
| lnykryn/systemd | src/core/unit.c | C | gpl-2.0 | 116,041 |
/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <media/rc-core.h>
#include <media/gpio-ir-recv.h>
#define GPIO_IR_DRIVER_NAME "gpio-rc-recv"
#define GPIO_IR_DEVICE_NAME "gpio_ir_recv"
struct gpio_rc_dev {
struct rc_dev *rcdev;
unsigned int gpio_nr;
bool active_low;
int can_sleep;
};
#ifdef CONFIG_OF
/*
* Translate OpenFirmware node properties into platform_data
*/
static int gpio_ir_recv_get_devtree_pdata(struct device *dev,
struct gpio_ir_recv_platform_data *pdata)
{
struct device_node *np = dev->of_node;
enum of_gpio_flags flags;
int gpio;
gpio = of_get_gpio_flags(np, 0, &flags);
if (gpio < 0) {
if (gpio != -EPROBE_DEFER)
dev_err(dev, "Failed to get gpio flags (%d)\n", gpio);
return gpio;
}
pdata->gpio_nr = gpio;
pdata->active_low = (flags & OF_GPIO_ACTIVE_LOW);
/* probe() takes care of map_name == NULL or allowed_protos == 0 */
pdata->map_name = of_get_property(np, "linux,rc-map-name", NULL);
pdata->allowed_protos = 0;
return 0;
}
static struct of_device_id gpio_ir_recv_of_match[] = {
{ .compatible = "gpio-ir-receiver", },
{ },
};
MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
#else /* !CONFIG_OF */
#define gpio_ir_recv_get_devtree_pdata(dev, pdata) (-ENOSYS)
#endif
static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id)
{
struct gpio_rc_dev *gpio_dev = dev_id;
unsigned int gval;
int rc = 0;
enum raw_event_type type = IR_SPACE;
if (gpio_dev->can_sleep)
gval = gpio_get_value_cansleep(gpio_dev->gpio_nr);
else
gval = gpio_get_value(gpio_dev->gpio_nr);
if (gval < 0)
goto err_get_value;
if (gpio_dev->active_low)
gval = !gval;
if (gval == 1)
type = IR_PULSE;
rc = ir_raw_event_store_edge(gpio_dev->rcdev, type);
if (rc < 0)
goto err_get_value;
ir_raw_event_handle(gpio_dev->rcdev);
err_get_value:
return IRQ_HANDLED;
}
static int gpio_ir_recv_probe(struct platform_device *pdev)
{
struct gpio_rc_dev *gpio_dev;
struct rc_dev *rcdev;
const struct gpio_ir_recv_platform_data *pdata =
pdev->dev.platform_data;
int rc;
if (pdev->dev.of_node) {
struct gpio_ir_recv_platform_data *dtpdata =
devm_kzalloc(&pdev->dev, sizeof(*dtpdata), GFP_KERNEL);
if (!dtpdata)
return -ENOMEM;
rc = gpio_ir_recv_get_devtree_pdata(&pdev->dev, dtpdata);
if (rc)
return rc;
pdata = dtpdata;
}
if (!pdata)
return -EINVAL;
if (pdata->gpio_nr < 0)
return -EINVAL;
gpio_dev = kzalloc(sizeof(struct gpio_rc_dev), GFP_KERNEL);
if (!gpio_dev)
return -ENOMEM;
rcdev = rc_allocate_device();
if (!rcdev) {
rc = -ENOMEM;
goto err_allocate_device;
}
rcdev->priv = gpio_dev;
rcdev->driver_type = RC_DRIVER_IR_RAW;
rcdev->input_name = GPIO_IR_DEVICE_NAME;
rcdev->input_phys = GPIO_IR_DEVICE_NAME "/input0";
rcdev->input_id.bustype = BUS_HOST;
rcdev->input_id.vendor = 0x0001;
rcdev->input_id.product = 0x0001;
rcdev->input_id.version = 0x0100;
rcdev->dev.parent = &pdev->dev;
rcdev->driver_name = GPIO_IR_DRIVER_NAME;
<<<<<<< HEAD
rcdev->map_name = RC_MAP_SAMSUNG_NECX;
=======
if (pdata->allowed_protos)
rcdev->allowed_protos = pdata->allowed_protos;
else
rcdev->allowed_protos = RC_BIT_ALL;
rcdev->map_name = pdata->map_name ?: RC_MAP_EMPTY;
>>>>>>> common/android-3.10.y
gpio_dev->rcdev = rcdev;
gpio_dev->gpio_nr = pdata->gpio_nr;
gpio_dev->active_low = pdata->active_low;
rc = gpio_request(pdata->gpio_nr, "gpio-ir-recv");
if (rc < 0)
goto err_gpio_request;
gpio_dev->can_sleep = gpio_cansleep(pdata->gpio_nr);
rc = gpio_direction_input(pdata->gpio_nr);
if (rc < 0)
goto err_gpio_direction_input;
rc = rc_register_device(rcdev);
if (rc < 0) {
dev_err(&pdev->dev, "failed to register rc device\n");
goto err_register_rc_device;
}
platform_set_drvdata(pdev, gpio_dev);
rc = request_any_context_irq(gpio_to_irq(pdata->gpio_nr),
gpio_ir_recv_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"gpio-ir-recv-irq", gpio_dev);
if (rc < 0)
goto err_request_irq;
device_init_wakeup(&pdev->dev, pdata->can_wakeup);
return 0;
err_request_irq:
platform_set_drvdata(pdev, NULL);
rc_unregister_device(rcdev);
rcdev = NULL;
err_register_rc_device:
err_gpio_direction_input:
gpio_free(pdata->gpio_nr);
err_gpio_request:
rc_free_device(rcdev);
err_allocate_device:
kfree(gpio_dev);
return rc;
}
static int gpio_ir_recv_remove(struct platform_device *pdev)
{
struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
free_irq(gpio_to_irq(gpio_dev->gpio_nr), gpio_dev);
platform_set_drvdata(pdev, NULL);
rc_unregister_device(gpio_dev->rcdev);
gpio_free(gpio_dev->gpio_nr);
kfree(gpio_dev);
return 0;
}
#ifdef CONFIG_PM
static int gpio_ir_recv_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
if (device_may_wakeup(dev))
enable_irq_wake(gpio_to_irq(gpio_dev->gpio_nr));
else
disable_irq(gpio_to_irq(gpio_dev->gpio_nr));
return 0;
}
static int gpio_ir_recv_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
if (device_may_wakeup(dev))
disable_irq_wake(gpio_to_irq(gpio_dev->gpio_nr));
else
enable_irq(gpio_to_irq(gpio_dev->gpio_nr));
return 0;
}
static const struct dev_pm_ops gpio_ir_recv_pm_ops = {
.suspend = gpio_ir_recv_suspend,
.resume = gpio_ir_recv_resume,
};
#endif
static struct platform_driver gpio_ir_recv_driver = {
.probe = gpio_ir_recv_probe,
.remove = gpio_ir_recv_remove,
.driver = {
.name = GPIO_IR_DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(gpio_ir_recv_of_match),
#ifdef CONFIG_PM
.pm = &gpio_ir_recv_pm_ops,
#endif
},
};
module_platform_driver(gpio_ir_recv_driver);
MODULE_DESCRIPTION("GPIO IR Receiver driver");
MODULE_LICENSE("GPL v2");
| javelinanddart/android_kernel_3.10_ville | drivers/media/rc/gpio-ir-recv.c | C | gpl-2.0 | 6,544 |
/*
* Network block device - make block devices work over TCP
*
* Note that you can not swap over this thing, yet. Seems to work but
* deadlocks sometimes - you can not swap over TCP in general.
*
* Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
* Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
*
* This file is released under GPLv2 or later.
*
* (part of code stolen from loop.c)
*/
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/bio.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/ioctl.h>
#include <linux/mutex.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/net.h>
#include <linux/kthread.h>
#include <asm/uaccess.h>
#include <asm/types.h>
#include <linux/nbd.h>
#define NBD_MAGIC 0x68797548
#ifdef NDEBUG
#define dprintk(flags, fmt...)
#else /* NDEBUG */
#define dprintk(flags, fmt...) do { \
if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
} while (0)
#define DBG_IOCTL 0x0004
#define DBG_INIT 0x0010
#define DBG_EXIT 0x0020
#define DBG_BLKDEV 0x0100
#define DBG_RX 0x0200
#define DBG_TX 0x0400
static unsigned int debugflags;
#endif /* NDEBUG */
static unsigned int nbds_max = 16;
static struct nbd_device *nbd_dev;
static int max_part;
/*
* Use just one lock (or at most 1 per NIC). Two arguments for this:
* 1. Each NIC is essentially a synchronization point for all servers
* accessed through that NIC so there's no need to have more locks
* than NICs anyway.
* 2. More locks lead to more "Dirty cache line bouncing" which will slow
* down each lock to the point where they're actually slower than just
* a single lock.
* Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
*/
static DEFINE_SPINLOCK(nbd_lock);
#ifndef NDEBUG
static const char *ioctl_cmd_to_ascii(int cmd)
{
switch (cmd) {
case NBD_SET_SOCK: return "set-sock";
case NBD_SET_BLKSIZE: return "set-blksize";
case NBD_SET_SIZE: return "set-size";
case NBD_SET_TIMEOUT: return "set-timeout";
case NBD_SET_FLAGS: return "set-flags";
case NBD_DO_IT: return "do-it";
case NBD_CLEAR_SOCK: return "clear-sock";
case NBD_CLEAR_QUE: return "clear-que";
case NBD_PRINT_DEBUG: return "print-debug";
case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
case NBD_DISCONNECT: return "disconnect";
case BLKROSET: return "set-read-only";
case BLKFLSBUF: return "flush-buffer-cache";
}
return "unknown";
}
static const char *nbdcmd_to_ascii(int cmd)
{
switch (cmd) {
case NBD_CMD_READ: return "read";
case NBD_CMD_WRITE: return "write";
case NBD_CMD_DISC: return "disconnect";
case NBD_CMD_FLUSH: return "flush";
case NBD_CMD_TRIM: return "trim/discard";
}
return "invalid";
}
#endif /* NDEBUG */
static void nbd_end_request(struct request *req)
{
int error = req->errors ? -EIO : 0;
struct request_queue *q = req->q;
unsigned long flags;
dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
req, error ? "failed" : "done");
spin_lock_irqsave(q->queue_lock, flags);
__blk_end_request_all(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
}
static void sock_shutdown(struct nbd_device *nbd, int lock)
{
/* Forcibly shutdown the socket causing all listeners
* to error
*
* FIXME: This code is duplicated from sys_shutdown, but
* there should be a more generic interface rather than
* calling socket ops directly here */
if (lock)
mutex_lock(&nbd->tx_lock);
if (nbd->sock) {
dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
nbd->sock = NULL;
}
if (lock)
mutex_unlock(&nbd->tx_lock);
}
static void nbd_xmit_timeout(unsigned long arg)
{
struct task_struct *task = (struct task_struct *)arg;
printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n",
task->comm, task->pid);
force_sig(SIGKILL, task);
}
/*
* Send or receive packet.
*/
static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
int msg_flags)
{
struct socket *sock = nbd->sock;
int result;
struct msghdr msg;
struct kvec iov;
sigset_t blocked, oldset;
unsigned long pflags = current->flags;
if (unlikely(!sock)) {
dev_err(disk_to_dev(nbd->disk),
"Attempted %s on closed socket in sock_xmit\n",
(send ? "send" : "recv"));
return -EINVAL;
}
/* Allow interception of SIGKILL only
* Don't allow other signals to interrupt the transmission */
siginitsetinv(&blocked, sigmask(SIGKILL));
sigprocmask(SIG_SETMASK, &blocked, &oldset);
current->flags |= PF_MEMALLOC;
do {
sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
iov.iov_base = buf;
iov.iov_len = size;
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
if (send) {
struct timer_list ti;
if (nbd->xmit_timeout) {
init_timer(&ti);
ti.function = nbd_xmit_timeout;
ti.data = (unsigned long)current;
ti.expires = jiffies + nbd->xmit_timeout;
add_timer(&ti);
}
result = kernel_sendmsg(sock, &msg, &iov, 1, size);
if (nbd->xmit_timeout)
del_timer_sync(&ti);
} else
result = kernel_recvmsg(sock, &msg, &iov, 1, size,
msg.msg_flags);
if (signal_pending(current)) {
siginfo_t info;
printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
task_pid_nr(current), current->comm,
dequeue_signal_lock(current, ¤t->blocked, &info));
result = -EINTR;
sock_shutdown(nbd, !send);
break;
}
if (result <= 0) {
if (result == 0)
result = -EPIPE; /* short read */
break;
}
size -= result;
buf += result;
} while (size > 0);
sigprocmask(SIG_SETMASK, &oldset, NULL);
tsk_restore_flags(current, pflags, PF_MEMALLOC);
return result;
}
static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
int flags)
{
int result;
void *kaddr = kmap(bvec->bv_page);
result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
bvec->bv_len, flags);
kunmap(bvec->bv_page);
return result;
}
/* always call with the tx_lock held */
static int nbd_send_req(struct nbd_device *nbd, struct request *req)
{
int result, flags;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
request.magic = htonl(NBD_REQUEST_MAGIC);
request.type = htonl(nbd_cmd(req));
if (nbd_cmd(req) == NBD_CMD_FLUSH) {
/* Other values are reserved for FLUSH requests. */
request.from = 0;
request.len = 0;
} else {
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
}
memcpy(request.handle, &req, sizeof(req));
dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
nbd->disk->disk_name, req,
nbdcmd_to_ascii(nbd_cmd(req)),
(unsigned long long)blk_rq_pos(req) << 9,
blk_rq_bytes(req));
result = sock_xmit(nbd, 1, &request, sizeof(request),
(nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
goto error_out;
}
if (nbd_cmd(req) == NBD_CMD_WRITE) {
struct req_iterator iter;
struct bio_vec bvec;
/*
* we are really probing at internals to determine
* whether to set MSG_MORE or not...
*/
rq_for_each_segment(bvec, req, iter) {
flags = 0;
if (!rq_iter_last(bvec, iter))
flags = MSG_MORE;
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
nbd->disk->disk_name, req, bvec.bv_len);
result = sock_send_bvec(nbd, &bvec, flags);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
goto error_out;
}
}
}
return 0;
error_out:
return -EIO;
}
static struct request *nbd_find_request(struct nbd_device *nbd,
struct request *xreq)
{
struct request *req, *tmp;
int err;
err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
if (unlikely(err))
goto out;
spin_lock(&nbd->queue_lock);
list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
if (req != xreq)
continue;
list_del_init(&req->queuelist);
spin_unlock(&nbd->queue_lock);
return req;
}
spin_unlock(&nbd->queue_lock);
err = -ENOENT;
out:
return ERR_PTR(err);
}
static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
{
int result;
void *kaddr = kmap(bvec->bv_page);
result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
MSG_WAITALL);
kunmap(bvec->bv_page);
return result;
}
/* NULL returned = something went wrong, inform userspace */
static struct request *nbd_read_stat(struct nbd_device *nbd)
{
int result;
struct nbd_reply reply;
struct request *req;
reply.magic = 0;
result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Receive control failed (result %d)\n", result);
goto harderror;
}
if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
(unsigned long)ntohl(reply.magic));
result = -EPROTO;
goto harderror;
}
req = nbd_find_request(nbd, *(struct request **)reply.handle);
if (IS_ERR(req)) {
result = PTR_ERR(req);
if (result != -ENOENT)
goto harderror;
dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
reply.handle);
result = -EBADR;
goto harderror;
}
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
req->errors++;
return req;
}
dprintk(DBG_RX, "%s: request %p: got reply\n",
nbd->disk->disk_name, req);
if (nbd_cmd(req) == NBD_CMD_READ) {
struct req_iterator iter;
struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
result = sock_recv_bvec(nbd, &bvec);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
req->errors++;
return req;
}
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
nbd->disk->disk_name, req, bvec.bv_len);
}
}
return req;
harderror:
nbd->harderror = result;
return NULL;
}
static ssize_t pid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%ld\n",
(long) ((struct nbd_device *)disk->private_data)->pid);
}
static struct device_attribute pid_attr = {
.attr = { .name = "pid", .mode = S_IRUGO},
.show = pid_show,
};
static int nbd_do_it(struct nbd_device *nbd)
{
struct request *req;
int ret;
BUG_ON(nbd->magic != NBD_MAGIC);
sk_set_memalloc(nbd->sock->sk);
nbd->pid = task_pid_nr(current);
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (ret) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
nbd->pid = 0;
return ret;
}
while ((req = nbd_read_stat(nbd)) != NULL)
nbd_end_request(req);
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
nbd->pid = 0;
return 0;
}
static void nbd_clear_que(struct nbd_device *nbd)
{
struct request *req;
BUG_ON(nbd->magic != NBD_MAGIC);
/*
* Because we have set nbd->sock to NULL under the tx_lock, all
* modifications to the list must have completed by now. For
* the same reason, the active_req must be NULL.
*
* As a consequence, we don't need to take the spin lock while
* purging the list here.
*/
BUG_ON(nbd->sock);
BUG_ON(nbd->active_req);
while (!list_empty(&nbd->queue_head)) {
req = list_entry(nbd->queue_head.next, struct request,
queuelist);
list_del_init(&req->queuelist);
req->errors++;
nbd_end_request(req);
}
while (!list_empty(&nbd->waiting_queue)) {
req = list_entry(nbd->waiting_queue.next, struct request,
queuelist);
list_del_init(&req->queuelist);
req->errors++;
nbd_end_request(req);
}
}
static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
{
if (req->cmd_type != REQ_TYPE_FS)
goto error_out;
nbd_cmd(req) = NBD_CMD_READ;
if (rq_data_dir(req) == WRITE) {
if ((req->cmd_flags & REQ_DISCARD)) {
WARN_ON(!(nbd->flags & NBD_FLAG_SEND_TRIM));
nbd_cmd(req) = NBD_CMD_TRIM;
} else
nbd_cmd(req) = NBD_CMD_WRITE;
if (nbd->flags & NBD_FLAG_READ_ONLY) {
dev_err(disk_to_dev(nbd->disk),
"Write on read-only\n");
goto error_out;
}
}
if (req->cmd_flags & REQ_FLUSH) {
BUG_ON(unlikely(blk_rq_sectors(req)));
nbd_cmd(req) = NBD_CMD_FLUSH;
}
req->errors = 0;
mutex_lock(&nbd->tx_lock);
if (unlikely(!nbd->sock)) {
mutex_unlock(&nbd->tx_lock);
dev_err(disk_to_dev(nbd->disk),
"Attempted send on closed socket\n");
goto error_out;
}
nbd->active_req = req;
if (nbd_send_req(nbd, req) != 0) {
dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
req->errors++;
nbd_end_request(req);
} else {
spin_lock(&nbd->queue_lock);
list_add_tail(&req->queuelist, &nbd->queue_head);
spin_unlock(&nbd->queue_lock);
}
nbd->active_req = NULL;
mutex_unlock(&nbd->tx_lock);
wake_up_all(&nbd->active_wq);
return;
error_out:
req->errors++;
nbd_end_request(req);
}
static int nbd_thread(void *data)
{
struct nbd_device *nbd = data;
struct request *req;
set_user_nice(current, MIN_NICE);
while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
/* wait for something to do */
wait_event_interruptible(nbd->waiting_wq,
kthread_should_stop() ||
!list_empty(&nbd->waiting_queue));
/* extract request */
if (list_empty(&nbd->waiting_queue))
continue;
spin_lock_irq(&nbd->queue_lock);
req = list_entry(nbd->waiting_queue.next, struct request,
queuelist);
list_del_init(&req->queuelist);
spin_unlock_irq(&nbd->queue_lock);
/* handle request */
nbd_handle_req(nbd, req);
}
return 0;
}
/*
* We always wait for result of write, for now. It would be nice to make it optional
* in future
* if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
* { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
*/
static void do_nbd_request(struct request_queue *q)
__releases(q->queue_lock) __acquires(q->queue_lock)
{
struct request *req;
while ((req = blk_fetch_request(q)) != NULL) {
struct nbd_device *nbd;
spin_unlock_irq(q->queue_lock);
dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
req->rq_disk->disk_name, req, req->cmd_type);
nbd = req->rq_disk->private_data;
BUG_ON(nbd->magic != NBD_MAGIC);
if (unlikely(!nbd->sock)) {
dev_err(disk_to_dev(nbd->disk),
"Attempted send on closed socket\n");
req->errors++;
nbd_end_request(req);
spin_lock_irq(q->queue_lock);
continue;
}
spin_lock_irq(&nbd->queue_lock);
list_add_tail(&req->queuelist, &nbd->waiting_queue);
spin_unlock_irq(&nbd->queue_lock);
wake_up(&nbd->waiting_wq);
spin_lock_irq(q->queue_lock);
}
}
/* Must be called with tx_lock held */
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case NBD_DISCONNECT: {
struct request sreq;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
if (!nbd->sock)
return -EINVAL;
mutex_unlock(&nbd->tx_lock);
fsync_bdev(bdev);
mutex_lock(&nbd->tx_lock);
blk_rq_init(NULL, &sreq);
sreq.cmd_type = REQ_TYPE_SPECIAL;
nbd_cmd(&sreq) = NBD_CMD_DISC;
/* Check again after getting mutex back. */
if (!nbd->sock)
return -EINVAL;
nbd->disconnect = 1;
nbd_send_req(nbd, &sreq);
return 0;
}
case NBD_CLEAR_SOCK: {
struct socket *sock = nbd->sock;
nbd->sock = NULL;
nbd_clear_que(nbd);
BUG_ON(!list_empty(&nbd->queue_head));
BUG_ON(!list_empty(&nbd->waiting_queue));
kill_bdev(bdev);
if (sock)
sockfd_put(sock);
return 0;
}
case NBD_SET_SOCK: {
struct socket *sock;
int err;
if (nbd->sock)
return -EBUSY;
sock = sockfd_lookup(arg, &err);
if (sock) {
nbd->sock = sock;
if (max_part > 0)
bdev->bd_invalidated = 1;
nbd->disconnect = 0; /* we're connected now */
return 0;
}
return -EINVAL;
}
case NBD_SET_BLKSIZE:
nbd->blksize = arg;
nbd->bytesize &= ~(nbd->blksize-1);
bdev->bd_inode->i_size = nbd->bytesize;
set_blocksize(bdev, nbd->blksize);
set_capacity(nbd->disk, nbd->bytesize >> 9);
return 0;
case NBD_SET_SIZE:
nbd->bytesize = arg & ~(nbd->blksize-1);
bdev->bd_inode->i_size = nbd->bytesize;
set_blocksize(bdev, nbd->blksize);
set_capacity(nbd->disk, nbd->bytesize >> 9);
return 0;
case NBD_SET_TIMEOUT:
nbd->xmit_timeout = arg * HZ;
return 0;
case NBD_SET_FLAGS:
nbd->flags = arg;
return 0;
case NBD_SET_SIZE_BLOCKS:
nbd->bytesize = ((u64) arg) * nbd->blksize;
bdev->bd_inode->i_size = nbd->bytesize;
set_blocksize(bdev, nbd->blksize);
set_capacity(nbd->disk, nbd->bytesize >> 9);
return 0;
case NBD_DO_IT: {
struct task_struct *thread;
struct socket *sock;
int error;
if (nbd->pid)
return -EBUSY;
if (!nbd->sock)
return -EINVAL;
mutex_unlock(&nbd->tx_lock);
if (nbd->flags & NBD_FLAG_READ_ONLY)
set_device_ro(bdev, true);
if (nbd->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
nbd->disk->queue);
if (nbd->flags & NBD_FLAG_SEND_FLUSH)
blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
else
blk_queue_flush(nbd->disk->queue, 0);
thread = kthread_create(nbd_thread, nbd, "%s",
nbd->disk->disk_name);
if (IS_ERR(thread)) {
mutex_lock(&nbd->tx_lock);
return PTR_ERR(thread);
}
wake_up_process(thread);
error = nbd_do_it(nbd);
kthread_stop(thread);
mutex_lock(&nbd->tx_lock);
if (error)
return error;
sock_shutdown(nbd, 0);
sock = nbd->sock;
nbd->sock = NULL;
nbd_clear_que(nbd);
dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
kill_bdev(bdev);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
set_device_ro(bdev, false);
if (sock)
sockfd_put(sock);
nbd->flags = 0;
nbd->bytesize = 0;
bdev->bd_inode->i_size = 0;
set_capacity(nbd->disk, 0);
if (max_part > 0)
ioctl_by_bdev(bdev, BLKRRPART, 0);
if (nbd->disconnect) /* user requested, ignore socket errors */
return 0;
return nbd->harderror;
}
case NBD_CLEAR_QUE:
/*
* This is for compatibility only. The queue is always cleared
* by NBD_DO_IT or NBD_CLEAR_SOCK.
*/
return 0;
case NBD_PRINT_DEBUG:
dev_info(disk_to_dev(nbd->disk),
"next = %p, prev = %p, head = %p\n",
nbd->queue_head.next, nbd->queue_head.prev,
&nbd->queue_head);
return 0;
}
return -ENOTTY;
}
static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct nbd_device *nbd = bdev->bd_disk->private_data;
int error;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
BUG_ON(nbd->magic != NBD_MAGIC);
/* Anyone capable of this syscall can do *real bad* things */
dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
mutex_lock(&nbd->tx_lock);
error = __nbd_ioctl(bdev, nbd, cmd, arg);
mutex_unlock(&nbd->tx_lock);
return error;
}
static const struct block_device_operations nbd_fops =
{
.owner = THIS_MODULE,
.ioctl = nbd_ioctl,
};
/*
* And here should be modules and kernel interface
* (Just smiley confuses emacs :-)
*/
static int __init nbd_init(void)
{
int err = -ENOMEM;
int i;
int part_shift;
BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
if (max_part < 0) {
printk(KERN_ERR "nbd: max_part must be >= 0\n");
return -EINVAL;
}
nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
if (!nbd_dev)
return -ENOMEM;
part_shift = 0;
if (max_part > 0) {
part_shift = fls(max_part);
/*
* Adjust max_part according to part_shift as it is exported
* to user space so that user can know the max number of
* partition kernel should be able to manage.
*
* Note that -1 is required because partition 0 is reserved
* for the whole disk.
*/
max_part = (1UL << part_shift) - 1;
}
if ((1UL << part_shift) > DISK_MAX_PARTS)
return -EINVAL;
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)
goto out;
nbd_dev[i].disk = disk;
/*
* The new linux 2.5 block layer implementation requires
* every gendisk to have its very own request_queue struct.
* These structs are big so we dynamically allocate them.
*/
disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
if (!disk->queue) {
put_disk(disk);
goto out;
}
/*
* Tell the block layer that we are not a rotational device
*/
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
disk->queue->limits.discard_granularity = 512;
disk->queue->limits.max_discard_sectors = UINT_MAX;
disk->queue->limits.discard_zeroes_data = 0;
blk_queue_max_hw_sectors(disk->queue, 65536);
disk->queue->limits.max_sectors = 256;
}
if (register_blkdev(NBD_MAJOR, "nbd")) {
err = -EIO;
goto out;
}
printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = nbd_dev[i].disk;
nbd_dev[i].magic = NBD_MAGIC;
INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
spin_lock_init(&nbd_dev[i].queue_lock);
INIT_LIST_HEAD(&nbd_dev[i].queue_head);
mutex_init(&nbd_dev[i].tx_lock);
init_waitqueue_head(&nbd_dev[i].active_wq);
init_waitqueue_head(&nbd_dev[i].waiting_wq);
nbd_dev[i].blksize = 1024;
nbd_dev[i].bytesize = 0;
disk->major = NBD_MAJOR;
disk->first_minor = i << part_shift;
disk->fops = &nbd_fops;
disk->private_data = &nbd_dev[i];
sprintf(disk->disk_name, "nbd%d", i);
set_capacity(disk, 0);
add_disk(disk);
}
return 0;
out:
while (i--) {
blk_cleanup_queue(nbd_dev[i].disk->queue);
put_disk(nbd_dev[i].disk);
}
kfree(nbd_dev);
return err;
}
static void __exit nbd_cleanup(void)
{
int i;
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = nbd_dev[i].disk;
nbd_dev[i].magic = 0;
if (disk) {
del_gendisk(disk);
blk_cleanup_queue(disk->queue);
put_disk(disk);
}
}
unregister_blkdev(NBD_MAJOR, "nbd");
kfree(nbd_dev);
printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
}
module_init(nbd_init);
module_exit(nbd_cleanup);
MODULE_DESCRIPTION("Network Block Device");
MODULE_LICENSE("GPL");
module_param(nbds_max, int, 0444);
MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
module_param(max_part, int, 0444);
MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
#ifndef NDEBUG
module_param(debugflags, int, 0644);
MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
#endif
| MatiasBjorling/lightnvm-moved-to-OpenChannelSSD-Linux | drivers/block/nbd.c | C | gpl-2.0 | 22,865 |
/* Source for:
* Cypress TrueTouch(TM) Standard Product I2C touchscreen driver.
* drivers/input/touchscreen/cyttsp-i2c.c
*
* Copyright (C) 2009, 2010 Cypress Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2, and only version 2, as published by the
* Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Cypress reserves the right to make changes without further notice
* to the materials described herein. Cypress does not assume any
* liability arising out of the application described herein.
*
* Contact Cypress Semiconductor at www.cypress.com
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/gpio.h>
#include <mach/gpio.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/byteorder/generic.h>
#include <linux/bitops.h>
#ifdef CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
#endif /* CONFIG_HAS_EARLYSUSPEND */
#include <linux/regulator/consumer.h>
#include <mach/vreg.h>
#include <linux/wakelock.h>
#include <linux/input/mt.h>
#define CYTTSP_DECLARE_GLOBALS
#include <linux/miscdevice.h>
#include <linux/cyttsp.h>
#ifdef FEATURE_CYTTSP_FIRMWAREUPGRADE
#include <linux/issp_defs.h>
#include <linux/issp_extern.h>
#endif
#include <linux/uaccess.h>
uint32_t cyttsp_tsdebug1;
module_param_named(tsdebug1, cyttsp_tsdebug1, uint, 0664);
#if defined(CONFIG_EF33_BOARD) || defined(CONFIG_EF34_BOARD)
#define FEATURE_TOUCH_KEY
#endif
/* -------------------------------------------------------------------- */
/* EF33S gpio & resolution & key area*/
/* -------------------------------------------------------------------- */
#define GPIO_TOUCH_RST 95
#define GPIO_TOUCH_CHG 61
#define GPIO_TOUCH_SDA 64
#define GPIO_TOUCH_SCL 65
#define GPIO_TOUCH_ID 93
#define IRQ_TOUCH_INT gpio_to_irq(GPIO_TOUCH_CHG)
/* -------------------------------------------------------------------- */
/* debug option */
/* -------------------------------------------------------------------- */
//#define TOUCH_DBG_ENABLE
#ifdef TOUCH_DBG_ENABLE
#define dbg(fmt, args...) printk("[TOUCH]" fmt, ##args)
#else
#define dbg(fmt, args...)
#endif
#define dbg_func_in() dbg("[FUNC_IN] %s\n", __func__)
#define dbg_func_out() dbg("[FUNC_OUT] %s\n", __func__)
#define dbg_line() dbg("[LINE] %d(%s)\n", __LINE__, __func__)
/* -------------------------------------------------------------------- */
#define FEATURE_SKY_PROCESS_CMD_KEY
#ifdef FEATURE_TOUCH_KEY
#define X_MAX 480
#define Y_MAX 800
#define NULL_KEY_AREA 840
#define MENU_KEY_MIN 40
#define MENU_KEY_MAX 140
#define HOME_KEY_MIN 210
#define HOME_KEY_MAX 280
#define BACK_KEY_MIN 360
#define BACK_KEY_MAX 460
#endif
#ifdef FEATURE_CYTTSP_HEARTBEAT
#define CYTTSP_HEARTBEAT_TIME 3
#endif
#define TOUCH_MAX_NUM 4 // 2
#define SENSOR_X 12
#define SENSOR_Y 20
#define MAX_NODE SENSOR_X*SENSOR_Y
#define CYTTSP_BASE_MIN 65
#define CYTTSP_BASE_MAX 135
#define CYTTSP_MUTEX_LOCK //ST_LIM
//----------------- Added --------------//
/* abs settings */
/* abs value offsets */
#define CY_NUM_ABS_VAL 5 /* number of abs values per setting */
#define CY_SIGNAL_OST 0
#define CY_MIN_OST 1
#define CY_MAX_OST 2
#define CY_FUZZ_OST 3
#define CY_FLAT_OST 4
/* axis signal offsets */
#define CY_NUM_ABS_SET 5 /* number of abs signal sets */
#define CY_ABS_X_OST 0
#define CY_ABS_Y_OST 1
#define CY_ABS_P_OST 2
#define CY_ABS_W_OST 3
#define CY_ABS_ID_OST 4
#define CY_IGNORE_VALUE 0xFFFF /* mark unused signals as ignore */
#define HI_TRACKID(reg) ((reg & 0xF0) >> 4)
#define LO_TRACKID(reg) ((reg & 0x0F) >> 0)
/* Touch structure */
struct cyttsp_trk{
bool tch;
int abs[CY_NUM_ABS_SET];
} ;
int prev_touches=0;
//----------------- --------------------------//
/* ****************************************************************************
* static value
* ************************************************************************** */
static struct cyttsp_gen3_xydata_t g_xy_data;
//static struct cyttsp_bootloader_data_t g_bl_data;
//static struct cyttsp_sysinfo_data_t g_sysinfo_data;
//static struct cyttsp_gen3_xydata_t g_wake_data;
static const struct i2c_device_id cyttsp_id[] = {
{ CYTTSP_I2C_NAME, 0 }, { }
};
/* CY TTSP I2C Driver private data */
struct cyttsp {
struct i2c_client *client;
struct input_dev *input;
struct work_struct work;
#ifdef FEATURE_CYTTSP_HEARTBEAT
struct work_struct work2;
#endif
#ifdef FEATURE_CYTTSP_FIRMWAREUPGRADE
//struct work_struct work3; // N1037 20120312 for ICS
struct delayed_work work3;
#endif
struct timer_list timer;
struct mutex mutex;
#ifdef CYTTSP_MUTEX_LOCK
struct mutex lock_mutex;
#endif
char phys[32];
struct cyttsp_platform_data *platform_data;
u8 num_prev_touch;
u16 active_track[CYTTSP_NUM_TRACK_ID];
u16 prev_st_touch[CYTTSP_NUM_ST_TOUCH_ID];
u16 prev_mt_touch[CYTTSP_NUM_MT_TOUCH_ID];
u16 prev_mt_pos[CYTTSP_NUM_MT_TOUCH_ID][2];
struct cyttsp_trk prv_trk[CYTTSP_NUM_TRACK_ID];
atomic_t irq_enabled;
struct early_suspend early_suspend;
};
#ifdef FEATURE_CYTTSP_HEARTBEAT
static int start_heartbeat_timer = false;
#endif
#ifdef FEATURE_SKY_PROCESS_CMD_KEY
struct cyttsp *cyttsp_data = NULL;
#endif
/* To check touch chip */
static int Touch_Dbg_Enable =0;
//static u16 prev_mt_pos[CYTTSP_NUM_TRACK_ID][2];
static struct wake_lock touch_wake_lock;
typedef enum touch_status {
TOUCH_POWERON,
TOUCH_POWEROFF,
TOUCH_UPDATE
} touch_status;
typedef enum
{
BATTERY_PLUGGED_NONE = 0,
BATTERY_PLUGGED_AC = 1,
BATTERY_PLUGGED_USB = 2,
BATTERY_PLUGGED_SLEEP = 10
} CHARGER_MODE;
typedef enum touch_ioctl {
TOUCH_CHARGER_MODE = 701,
TOUCH_IOCTL_READ_LASTKEY = 1001,
TOUCH_IOCTL_DO_KEY,
TOUCH_IOCTL_RELEASE_KEY,
TOUCH_IOCTL_PRESS_TOUCH = 1007,
TOUCH_IOCTL_RELEASE_TOUCH,
TOUCH_IOCTL_SENSOR_X = 2005,
TOUCH_IOCTL_SENSOR_Y,
TOUCH_IOCTL_CHECK_BASE,
TOUCH_IOCTL_READ_IC_VERSION,
TOUCH_IOCTL_READ_FW_VERSION,
TOUCH_IOCTL_START_UPDATE,
TOUCH_IOCTL_SELF_TEST,
TOUCH_IOCTL_SET_COLOR
} touch_ioctl;
static int Touch_Status =TOUCH_POWERON;
static int Touch_ChagerMode = BATTERY_PLUGGED_NONE;
static unsigned char bBlack=false;
struct cyttsp *ts_temp;
#if defined(CONFIG_APACHE_BOARD)
struct delayed_work work_delay_firmware;
#endif
MODULE_DEVICE_TABLE(i2c, cyttsp_id);
/* ****************************************************************************
* Prototypes for static functions
* ************************************************************************** */
static void cyttsp_xy_worker(struct work_struct *work);
#ifdef FEATURE_CYTTSP_HEARTBEAT
static void cyttsp_check_heartbeat(struct work_struct *work2);
#endif
#ifdef FEATURE_CYTTSP_FIRMWAREUPGRADE
void check_firmware_update(struct work_struct *work3);
#endif
static irqreturn_t cyttsp_irq(int irq, void *handle);
#if 0
static int cyttsp_inlist(u16 prev_track[], u8 curr_track_id, u8 *prev_loc, u8 num_touches);
static int cyttsp_next_avail_inlist(u16 curr_track[], u8 *new_loc, u8 num_touches);
#endif
#ifdef CYTTSP_INCLUDE_LOAD_FILE //[BIH] ICS port...
static int cyttsp_putbl(struct cyttsp *ts, int show, int show_status, int show_version, int show_cid);
#endif// CYTTSP_INCLUDE_LOAD_FILE //[BIH] ICS port...
static int __devinit cyttsp_probe(struct i2c_client *client, const struct i2c_device_id *id);
static int __devexit cyttsp_remove(struct i2c_client *client);
static int cyttsp_resume(struct i2c_client *client);
static int cyttsp_suspend(struct i2c_client *client, pm_message_t message);
#ifdef FEATURE_SKY_PROCESS_CMD_KEY
static long ts_fops_ioctl(struct file *filp,unsigned int cmd, unsigned long arg);
static int ts_fops_open(struct inode *inode, struct file *filp);
#endif
#ifdef CONFIG_HAS_EARLYSUSPEND
static void cyttsp_early_suspend(struct early_suspend *handler);
static void cyttsp_late_resume(struct early_suspend *handler);
#endif /* CONFIG_HAS_EARLYSUSPEND */
static int pantech_auto_check(u8*);
#ifdef FEATURE_CYTTSP_FIRMWAREUPGRADE
static int firmware_update_by_user(void);
static int firmware_version_check(void);
#endif
static int pantech_selftest_check(void);
void Change_Active_Distance(u8 value); //test
/* ****************************************************************************
*
* ************************************************************************** */
static struct i2c_driver cyttsp_driver = {
.driver = {
.name = CYTTSP_I2C_NAME,
.owner = THIS_MODULE,
},
.probe = cyttsp_probe,
.remove = __devexit_p(cyttsp_remove),
// .suspend = cyttsp_suspend,
// .resume = cyttsp_resume,
.id_table = cyttsp_id,
};
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen driver");
MODULE_AUTHOR("Cypress");
#ifdef FEATURE_SKY_PROCESS_CMD_KEY
struct cyttsp *sky_process_cmd_ts=NULL;
static struct file_operations ts_fops = {
.owner = THIS_MODULE,
.open = ts_fops_open,
// .release = ts_fops_close,
.unlocked_ioctl = ts_fops_ioctl,
};
static struct miscdevice touch_event = {
.minor = MISC_DYNAMIC_MINOR,
.name = "touch_fops",
.fops = &ts_fops,
};
static int ts_fops_open(struct inode *inode, struct file *filp)
{
//filp->private_data = cyttsp_data;
return 0;
}
#if 1 //[BIH] ICS port... ioctl changed to unlocked_ioctl or compact_ioctl...
//static DEFINE_MUTEX(cyttsp_mutex);
static long ts_fops_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
#else
static int ts_fops_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
#endif
{
void __user *argp = (void __user *)arg;
if(cyttsp_data ==NULL)
{
cyttsp_debug("Null Device\n");
return 0;
}
cyttsp_debug("cmd = %d, argp = 0x%x\n", cmd, (unsigned int)argp);
// printk("cmd = %d, argp = 0x%x\n", cmd, (unsigned int)argp);
// mutex_lock(&cyttsp_mutex);
switch (cmd)
{
case TOUCH_IOCTL_READ_LASTKEY:
break;
case TOUCH_IOCTL_DO_KEY:
if ( (int)argp == 0x20a )
input_report_key(cyttsp_data->input, 0xe3, 1);
else if ( (int)argp == 0x20b )
input_report_key(cyttsp_data->input, 0xe4, 1);
else
input_report_key(cyttsp_data->input, (int)argp, 1);
input_sync(cyttsp_data->input);
if((int)argp == KEY_9)
{
printk("Enable Touch Debug!!\n");
Touch_Dbg_Enable = true;
}
else if((int)argp == KEY_8)
{
printk("Disable Touch Debug!!\n");
Touch_Dbg_Enable = false;
}
/*
else if((int)argp == KEY_F2)
{
int ret = 0;
printk("Start Touch Firmware update!!\n");
ret = firmware_update_by_user();
}
*/
break;
case TOUCH_IOCTL_RELEASE_KEY:
if ( (int)argp == 0x20a )
input_report_key(cyttsp_data->input, 0xe3, 0);
else if ( (int)argp == 0x20b )
input_report_key(cyttsp_data->input, 0xe4, 0);
else
input_report_key(cyttsp_data->input, (int)argp, 0);
input_sync(cyttsp_data->input);
break;
// +++ FEATURE_P_VZW_PS_STABILITY_AT_CMD
case TOUCH_IOCTL_PRESS_TOUCH:
{
int touchX=arg&0x0000FFFF;
int touchY= (arg >> 16) & 0x0000FFFF;
input_report_abs(cyttsp_data->input, ABS_MT_TOOL_TYPE , 1);
input_report_abs(cyttsp_data->input, ABS_MT_TOUCH_MAJOR, CYTTSP_TOUCH);
input_report_abs(cyttsp_data->input, ABS_MT_WIDTH_MAJOR, CYTTSP_SMALL_TOOL_WIDTH);
input_report_abs(cyttsp_data->input, ABS_MT_POSITION_X, touchX);
input_report_abs(cyttsp_data->input, ABS_MT_POSITION_Y, touchY);
CYTTSP_MT_SYNC(cyttsp_data->input);
input_sync(cyttsp_data->input);
}
break;
case TOUCH_IOCTL_RELEASE_TOUCH:
{
int touchX=arg&0x0000FFFF;
int touchY= (arg >> 16) & 0x0000FFFF;
input_report_abs(cyttsp_data->input, ABS_MT_TOOL_TYPE , 1);
input_report_abs(cyttsp_data->input, ABS_MT_TOUCH_MAJOR, CYTTSP_NOTOUCH);
input_report_abs(cyttsp_data->input, ABS_MT_WIDTH_MAJOR, CYTTSP_SMALL_TOOL_WIDTH);
input_report_abs(cyttsp_data->input, ABS_MT_POSITION_X, touchX);
input_report_abs(cyttsp_data->input, ABS_MT_POSITION_Y, touchY);
CYTTSP_MT_SYNC(cyttsp_data->input);
input_sync(cyttsp_data->input);
}
break;
// ---
case TOUCH_IOCTL_SENSOR_X:
{
int send_data;
send_data = SENSOR_X;
if (copy_to_user(argp, &send_data, sizeof(send_data)))
return false;
}
break;
case TOUCH_IOCTL_SENSOR_Y:
{
int send_data;
send_data = SENSOR_Y;
if (copy_to_user(argp, &send_data, sizeof(send_data)))
return false;
}
break;
case TOUCH_IOCTL_CHECK_BASE:
{
u8 send_byte[MAX_NODE];
//printk("TOUCH_IOCTL_CHECK_BASE!!\n");
disable_irq_nosync(ts_temp->client->irq);
pantech_auto_check(send_byte);
enable_irq(ts_temp->client->irq);
if (copy_to_user(argp, send_byte, MAX_NODE))
return false;
}
break;
#ifdef FEATURE_CYTTSP_FIRMWAREUPGRADE
case TOUCH_IOCTL_READ_IC_VERSION:
{
int ret = 0;
ret = firmware_version_check();
if (copy_to_user(argp, &ret, sizeof(ret)))
return false;
}
break;
case TOUCH_IOCTL_READ_FW_VERSION:
{
int ret =0;
if(bBlack == false) // White Model etc..
ret = CYTTPS_NONBLACK_FIRMWARE_VER_ID;
else // Black Model
ret = CYTTPS_FIRMWARE_VER_ID;
if (copy_to_user(argp, &ret, sizeof(ret)))
return false;
}
break;
case TOUCH_IOCTL_START_UPDATE:
{
int ret = 0;
ret = firmware_update_by_user(); // if ret == 0 success, or not fail
printk("TOUCH_IOCTL_START_UPDATE ret : %d\n", ret);
if (copy_to_user(argp, &ret, sizeof(ret)))
return false;
}
break;
#endif
case TOUCH_CHARGER_MODE:
printk("TOUCH_CHARGER_MODE Setting : %d\n", (int)arg);
Touch_ChagerMode = arg;
break;
case TOUCH_IOCTL_SELF_TEST:
{
int ret = 0;
ret = pantech_selftest_check();
if (copy_to_user(argp, &ret, sizeof(ret)))
return false;
}
break;
case TOUCH_IOCTL_SET_COLOR:
bBlack = arg;
break;
default:
break;
}
// mutex_unlock(&cyttsp_mutex);
return true;
}
#endif
void Change_Active_Distance(u8 value)
{
int rc = -1;
u8 byte_data;
struct cyttsp *ts = ts_temp;
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
rc = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_GEST_SET,sizeof(byte_data), &byte_data);
//printk("Chage_Active_Distance : %02x\n", byte_data);
byte_data = value;
rc = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_GEST_SET, sizeof(byte_data), &byte_data);
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
return;
}
static ssize_t cyttsp_irq_status(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = container_of(dev,
struct i2c_client, dev);
struct cyttsp *ts = i2c_get_clientdata(client);
return sprintf(buf, "%u\n", atomic_read(&ts->irq_enabled));
}
static ssize_t cyttsp_irq_enable(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct i2c_client *client = container_of(dev,
struct i2c_client, dev);
struct cyttsp *ts = i2c_get_clientdata(client);
int err = 0;
unsigned long value;
/*
struct qtm_obj_message *msg;
*/
if (size > 2)
return -EINVAL;
err = strict_strtoul(buf, 10, &value);
if (err != 0)
return err;
switch (value) {
case 0:
if (atomic_cmpxchg(&ts->irq_enabled, 1, 0)) {
pr_info("touch irq disabled!\n");
disable_irq_nosync(ts->client->irq);
}
err = size;
break;
case 1:
if (!atomic_cmpxchg(&ts->irq_enabled, 0, 1)) {
pr_info("touch irq enabled!\n");
/*
msg = cyttsp_read_msg(ts);
if (msg == NULL)
pr_err("%s: Cannot read message\n", __func__);
*/
enable_irq(ts->client->irq);
}
err = size;
break;
default:
pr_info("cyttsp_irq_enable failed -> irq_enabled = %d\n",
atomic_read(&ts->irq_enabled));
err = -EINVAL;
break;
}
return err;
}
static DEVICE_ATTR(irq_enable, 0664, cyttsp_irq_status, cyttsp_irq_enable);
int pantech_ctl_update(int cmd, int value)
{
int rt = -1;
struct regulator *vreg_touch, *vreg_touch_temp;
switch(cmd)
{
case ISSP_IOCTL_SCLK_TO_GPIO:
if(value){
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_SCL, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_ENABLE);
}
else{
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_SCL, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_DISABLE);
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_SCL, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_ENABLE);
}
rt = 1;
break;
case ISSP_IOCTL_DATA_TO_GPIO:
if(value){
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_SDA, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_ENABLE);
}
else{
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_SDA, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_DISABLE);
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_SDA, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_ENABLE);
}
rt = 1;
break;
case ISSP_IOCTL_SCLK:
gpio_set_value(GPIO_TOUCH_SCL, value);
rt = 1;
break;
case ISSP_IOCTL_DATA:
gpio_set_value(GPIO_TOUCH_SDA, value);
rt = 1;
break;
case ISSP_IOCTL_RESET:
break;
case ISSP_IOCTL_POWER:
//printk("Touch Power: %d, cmd: %d\n", value, cmd);
#if EF33S_BDVER_GE(WS20) || EF34K_BDVER_GE(WS20)
vreg_touch = regulator_get(NULL, "8058_l11");
regulator_set_voltage(vreg_touch, 1900000, 1900000);
#else
vreg_touch = regulator_get(NULL, "8058_lvs0");
#endif
if(value)
rt = regulator_enable(vreg_touch);
else
rt = regulator_disable(vreg_touch);
regulator_put(vreg_touch);
break;
case ISSP_IOCTL_POWER_ALL:
//printk("Touch Power All: %d, cmd: %d\n", value, cmd);
vreg_touch_temp = regulator_get(NULL, "8058_l19");
#if EF33S_BDVER_GE(WS20) || EF34K_BDVER_GE(WS20)
vreg_touch = regulator_get(NULL, "8058_l11");
regulator_set_voltage(vreg_touch, 1900000, 1900000);
#else
vreg_touch = regulator_get(NULL, "8058_lvs0");
#endif
regulator_set_voltage(vreg_touch_temp, 3000000, 3000000);
if(value)
{
rt = regulator_enable(vreg_touch);
rt = regulator_enable(vreg_touch_temp);
}
else
{
rt = regulator_disable(vreg_touch);
rt = regulator_disable(vreg_touch_temp);
}
regulator_put(vreg_touch);
regulator_put(vreg_touch_temp);
break;
case ISSP_IOCTL_READ_DATA_PIN:
rt = gpio_get_value(GPIO_TOUCH_SDA);
break;
case ISSP_IOCTL_WAIT:
udelay(value);
break;
case ISSP_IOCTL_INTR:
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_CHG, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_ENABLE);
gpio_set_value(GPIO_TOUCH_CHG, value);
rt = 1;
break;
case ISSP_TEST_READ_SCL:
rt = gpio_get_value(GPIO_TOUCH_SCL);
break;
case ISSP_TEST_READ_SDA:
rt = gpio_get_value(GPIO_TOUCH_SDA);
break;
case ISSP_TEST_READ_RESET:
rt = gpio_get_value(GPIO_TOUCH_RST);
break;
case ISSP_COMPLITED_UPDATA:
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_CHG, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA),GPIO_CFG_ENABLE);
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_SCL, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_DISABLE);
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_SDA, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_DISABLE);
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_SDA, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA),GPIO_CFG_ENABLE);
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_SCL, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA),GPIO_CFG_ENABLE);
rt = 1;
break;
default:
dbg("UNKNOWN CMD\n");
break;
}
return rt;
}
#ifdef FEATURE_CYTTSP_FIRMWAREUPGRADE
void check_firmware_update(struct work_struct *work3)
{
int retry_cnt = 3;
u8 byte_data[4];
int rc = -1, check_update_pass = 0, curr_version =0;
struct cyttsp *ts = ts_temp;
// If phone enter a poweroff, Stop firmware update
if(Touch_Status >= TOUCH_POWEROFF)
return;
#ifdef FEATURE_CYTTSP_HEARTBEAT
start_heartbeat_timer = false;
#endif
wake_lock(&touch_wake_lock);
disable_irq(ts->client->irq);
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
do {
rc = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_READ_VER_ID,sizeof(byte_data), (u8 *)&byte_data);
udelay(2*1000);
} while ((rc < CYTTSP_OPERATIONAL) && --retry_cnt);
dbg("i2c communcation1 %s, byte_data = %d, %d, %d, %d\n", (rc < CYTTSP_OPERATIONAL) ? "FAIL" : "PASS",byte_data[0],byte_data[1],byte_data[2],byte_data[3]);
if((int)byte_data[0] == 0 || rc < CYTTSP_OPERATIONAL)
{
dbg("Retry read firmware version!\n");
msleep(200);
pantech_ctl_update(ISSP_IOCTL_POWER, 0);
msleep(100);
pantech_ctl_update(ISSP_IOCTL_POWER, 1);
msleep(200);
retry_cnt = 3;
do {
rc = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_READ_VER_ID,sizeof(byte_data), (u8 *)&byte_data);
udelay(2*1000);
} while ((rc < CYTTSP_OPERATIONAL) && --retry_cnt);
}
pantech_ctl_update(ISSP_IOCTL_INTR,0);
#ifdef FEATURE_SKY_NONBLACK_FIRMWARE
if(rc >= CYTTSP_OPERATIONAL) // read success
bBlack = (int)byte_data[0] % 2;
if(bBlack == false) // White Model etc..
curr_version = CYTTPS_NONBLACK_FIRMWARE_VER_ID;
else // Black Model
curr_version = CYTTPS_FIRMWARE_VER_ID;
#else
curr_version = CYTTPS_FIRMWARE_VER_ID;
#endif
dbg("[Touch] Model Black Check: %d, Current Version: %d\n", bBlack, curr_version);
if(((curr_version > byte_data[0]) && (byte_data[0] != 0)) || (rc < CYTTSP_OPERATIONAL))
{
retry_cnt = 5;
dbg("Start Firmware Update chip id: %d\n", byte_data[0]);
do{
check_update_pass = touch_update_main(bBlack);
udelay(2*1000);
}while((check_update_pass != 0) && --retry_cnt);
}
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
pantech_ctl_update(ISSP_IOCTL_INTR,1);
pantech_ctl_update(ISSP_COMPLITED_UPDATA,0);
if(check_update_pass != 0)
{
msleep(200);
pantech_ctl_update(ISSP_IOCTL_POWER, 0);
msleep(100);
pantech_ctl_update(ISSP_IOCTL_POWER, 1);
msleep(100);
cancel_work_sync(&ts->work);
}
dbg("check_firmware_update end!!(check_update_pass = %d)\n",check_update_pass);
enable_irq(ts->client->irq);
#ifdef FEATURE_CYTTSP_HEARTBEAT
start_heartbeat_timer = true;
#endif
wake_unlock(&touch_wake_lock);
return;
}
static int firmware_version_check(void)
{
int rc = -1, retry_cnt = 3;
u8 byte_data[4];
struct cyttsp *ts = ts_temp;
if(Touch_Status >= TOUCH_POWEROFF)
{
pantech_ctl_update(ISSP_IOCTL_POWER_ALL , 1);
pantech_ctl_update(ISSP_IOCTL_INTR, 1);
pantech_ctl_update(ISSP_COMPLITED_UPDATA, 0);
msleep(300);
// return false;
}
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
do {
rc = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_READ_VER_ID,sizeof(byte_data), (u8 *)&byte_data);
udelay(2*1000);
}
while ((rc < CYTTSP_OPERATIONAL) && --retry_cnt);
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
if(rc < CYTTSP_OPERATIONAL)
{
printk("Can't read Touch Firmware Version\n");
return 1;
}
printk("Touch Firmware Update Version : %d, Current Version: %d\n" ,
CYTTPS_FIRMWARE_VER_ID,(int)byte_data[0]);
if(Touch_Status >= TOUCH_POWEROFF)
{
pantech_ctl_update(ISSP_IOCTL_POWER_ALL , 0);
}
return (int)byte_data[0]; // Need not
}
static int firmware_set_charger_mode(int mode)
{
int rc = -1, retry_cnt = 3;
u8 byte_data[4], send_byte = 0x00;
struct cyttsp *ts = ts_temp;
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
do {
rc = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_CHARGER_MODE,sizeof(byte_data), (u8 *)&byte_data);
udelay(2*1000);
}
while ((rc < CYTTSP_OPERATIONAL) && --retry_cnt);
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
if(rc < CYTTSP_OPERATIONAL)
{
printk("Can't read Touch Charger Mode\n");
return 1;
}
if(Touch_Dbg_Enable)
printk("Touch IC Charger Mode %02x\n" ,(int)byte_data[0]);
if(mode > BATTERY_PLUGGED_NONE) // charger mode on
{
if((int)byte_data[0] != CYTTPS_CHARGER_MODE)
{
send_byte = CYTTPS_CHARGER_MODE;
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
rc = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_CHARGER_MODE, sizeof(send_byte), &send_byte);
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
}
}
else // charger mode off
{
if((int)byte_data[0] != 0x00)
{
send_byte = 0x00;
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
rc = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_CHARGER_MODE, sizeof(send_byte), &send_byte);
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
}
}
return 0;
}
static int firmware_update_by_user(void)
{
struct cyttsp *ts = ts_temp;
int check_update_pass = -1;
// If phone enter a poweroff, Stop firmware update
if(Touch_Status >= TOUCH_POWEROFF)
{
pantech_ctl_update(ISSP_IOCTL_POWER_ALL , 1);
msleep(300);
// return false;
}
#ifdef FEATURE_CYTTSP_HEARTBEAT
start_heartbeat_timer = false;
#endif
Touch_Status= TOUCH_UPDATE;
wake_lock(&touch_wake_lock);
/*ÀÎÅÍ·´ÅÍ PIN HIGH »óŸ¦ º¯°æÇϱâ À§ÇØ IRQ¸¦ ÇØÁ¦ÇÔ */
disable_irq(ts->client->irq);
/* ÀÎÆ®·´ÅÍ PIN HIGH »óÅ·ΠÀÎÇÏ¿© 2.6V_TOUCH Àü¿ø OFFÇÏ¿©µµ 1.2V Àü¾Ð È帣´Â Çö»óÀ¸·Î Value°ªÀ» 0À¸·Î ¼³Á¤ */
pantech_ctl_update(ISSP_IOCTL_INTR,0);
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
check_update_pass = touch_update_main(bBlack);
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
pantech_ctl_update(ISSP_IOCTL_INTR,1);
pantech_ctl_update(ISSP_COMPLITED_UPDATA,0);
msleep(100);
pantech_ctl_update(ISSP_IOCTL_POWER, 0);
msleep(100);
pantech_ctl_update(ISSP_IOCTL_POWER, 1);
msleep(100);
cancel_work_sync(&ts->work);
enable_irq(ts->client->irq);
#ifdef FEATURE_CYTTSP_HEARTBEAT
start_heartbeat_timer = true;
#endif
wake_unlock(&touch_wake_lock);
Touch_Status= TOUCH_POWERON;
return check_update_pass;
}
#endif
/* The cyttsp_xy_worker function reads the XY coordinates and sends them to
* the input layer. It is scheduled from the interrupt (or timer).
*/
#ifdef FEATURE_TOUCH_KEY
#define CYTTSP_MENU_KEY 0x01
#define CYTTSP_BACK_KEY 0x02
#define CYTTSP_HOME_KEY 0x04
#define CYTTSP_NULL_KEY 0x08
static int key_status = 0x00;
#endif
#if 0//def FEATURE_SKY_TOUCH_DELTA_DEBUG
static u16 pre_x_data;
static u16 pre_y_data;
static u16 delta_x;
static u16 delta_y;
#endif
#ifdef FEATURE_CYTTSP_HEARTBEAT
void cyttsp_check_heartbeat(struct work_struct *work2)
{
struct cyttsp *ts = container_of(work2,struct cyttsp,work2);
int retry_cnt = 3;
u8 new_heartbeart_data[4];
int rc = -1;
static u8 old_heartbeat_data = 0xFF;
memset((void*)new_heartbeart_data,0x00,sizeof(new_heartbeart_data));
if(start_heartbeat_timer == false)
return;
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
do {
/* Read Heartbeat Count */
rc = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_READ_HEARTBEAT,sizeof(new_heartbeart_data), (u8 *)&new_heartbeart_data);
}
while ((rc < CYTTSP_OPERATIONAL) && --retry_cnt);
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
if(Touch_Dbg_Enable)
printk("##### Check Count = %s, byte_data = %d, %d, %d, %d\n", (rc < CYTTSP_OPERATIONAL) ? "FAIL" : "PASS",new_heartbeart_data[0],new_heartbeart_data[1],new_heartbeart_data[2],new_heartbeart_data[3]);
if(start_heartbeat_timer == false)
return;
if(rc < CYTTSP_OPERATIONAL || old_heartbeat_data == new_heartbeart_data[0])
{
/* I2c error or Touch Chip's heartbeat value is not change */
disable_irq(ts->client->irq);
pantech_ctl_update(ISSP_IOCTL_INTR,0);
pantech_ctl_update(ISSP_IOCTL_POWER,0);
msleep(200);
pantech_ctl_update(ISSP_IOCTL_INTR,1);
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_CHG, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA),GPIO_CFG_ENABLE);
enable_irq(ts->client->irq);
pantech_ctl_update(ISSP_IOCTL_POWER,1);
if(Touch_Dbg_Enable)
printk("HeartBeat Fail old_data = %d, new_data = %d",old_heartbeat_data, new_heartbeart_data[0]);
}
if(!start_heartbeat_timer)
old_heartbeat_data = 0xFF;
// Set Charger Mode
firmware_set_charger_mode(Touch_ChagerMode);
return;
}
#endif
static bool cur_touchflag[TOUCH_MAX_NUM];
int touch_mask[TOUCH_MAX_NUM];
struct cyttsp_trk cur_trk[TOUCH_MAX_NUM];
void cyttsp_xy_worker(struct work_struct *work)
{
struct cyttsp *ts = container_of(work,struct cyttsp,work);
int i;
int retval = 0;
u8 curr_touches = 0;
u8 id = 0;
// int t = 0;
// int num_sent = 0;
// int signal = 0;
int tch = 0;
#ifdef FEATURE_TOUCH_KEY
int key_relese = true;
#endif
for (i=0;i<TOUCH_MAX_NUM;i++)
{
touch_mask[i] = -1;
}
if(Touch_Dbg_Enable)
printk("[TOUCH] Start cyttsp_xy_worker\n");
if(Touch_Status >= TOUCH_POWEROFF)
goto exit_xy_worker;
/* get event data from CYTTSP device */
i = CYTTSP_NUM_RETRY;
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
do {
retval = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_BASE,
sizeof(struct cyttsp_gen3_xydata_t), (u8 *)&g_xy_data);
}
while ((retval < CYTTSP_OPERATIONAL) && --i);
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
/* return immediately on failure to read device on the i2c bus */
if (retval < CYTTSP_OPERATIONAL) {
cyttsp_debug("exit_xy_worker 1");
goto exit_xy_worker;
}
cyttsp_xdebug("TTSP worker start 2:\n");
if ((curr_touches = GET_NUM_TOUCHES(g_xy_data.tt_stat)) > CYTTSP_NUM_MT_TOUCH_ID) {
/* if the number of fingers on the touch surface is more than the maximum
* then there will be no new track information even for the orginal
* touches. Therefore, ignore this touch event.
*/
cyttsp_debug("exit_xy_worker 2");
goto exit_xy_worker;
}
else if (IS_LARGE_AREA(g_xy_data.tt_stat)==1) {
/* terminate all active tracks */
curr_touches = CYTTSP_NOTOUCH;
cyttsp_debug("Large object detected. Terminating active tracks\n");
}
#ifdef FEATURE_TOUCH_KEY
for(i=0; i<curr_touches; i++)
{
int x =0, y=0;
switch(i)
{
case 0:
x = be16_to_cpu(g_xy_data.x1);
y = be16_to_cpu(g_xy_data.y1);
break;
case 1:
x = be16_to_cpu(g_xy_data.x2);
y = be16_to_cpu(g_xy_data.y2);
break;
case 2:
x = be16_to_cpu(g_xy_data.x3);
y = be16_to_cpu(g_xy_data.y3);
break;
case 3:
x = be16_to_cpu(g_xy_data.x4);
y = be16_to_cpu(g_xy_data.y4);
break;
default:
break;
}
if(y > Y_MAX)
{
key_relese = false;
if(y < NULL_KEY_AREA && (!key_status || key_status == CYTTSP_NULL_KEY))
{
key_status = CYTTSP_NULL_KEY;
dbg("Down TOUCH NULL\n");
}
else if((MENU_KEY_MIN < x && x < MENU_KEY_MAX) && (!key_status || key_status == CYTTSP_MENU_KEY))
{
key_status = CYTTSP_MENU_KEY;
input_report_key(ts->input, KEY_MENU, CYTTSP_TOUCH);
dbg("Down TOUCH MENU\n");
input_sync(ts->input);
}
else if((HOME_KEY_MIN < x && x < HOME_KEY_MAX) && (!key_status || key_status == CYTTSP_HOME_KEY))
{
key_status = CYTTSP_HOME_KEY;
input_report_key(ts->input, KEY_HOMEPAGE, CYTTSP_TOUCH);
dbg("Down TOUCH HOME\n");
input_sync(ts->input);
}
else if((BACK_KEY_MIN < x && x < BACK_KEY_MAX) && (!key_status || key_status == CYTTSP_BACK_KEY))
{
key_status = CYTTSP_BACK_KEY;
input_report_key(ts->input, KEY_BACK, CYTTSP_TOUCH);
dbg("Down TOUCH BACK\n");
input_sync(ts->input);
}
else if(!key_status)
key_status = CYTTSP_NULL_KEY;
}
}
if(key_relese && (curr_touches < prev_touches) && key_status)
{
if(key_status == CYTTSP_MENU_KEY)
input_report_key(ts->input, KEY_MENU, CYTTSP_NOTOUCH);
if(key_status == CYTTSP_HOME_KEY)
input_report_key(ts->input, KEY_HOMEPAGE, CYTTSP_NOTOUCH);
if(key_status == CYTTSP_BACK_KEY)
input_report_key(ts->input, KEY_BACK, CYTTSP_NOTOUCH);
if(key_status != CYTTSP_NULL_KEY)
input_sync(ts->input);
dbg("Up Key: %02x\n", key_status);
key_status = 0;
}
#endif
/* send no events if there were no previous touches and no new touches */
if ((prev_touches == CYTTSP_NOTOUCH) &&
((curr_touches == CYTTSP_NOTOUCH) || (curr_touches > CYTTSP_NUM_MT_TOUCH_ID))) {
cyttsp_debug("exit_xy_worker 3");
goto exit_xy_worker;
}
cyttsp_debug("prev=%d curr=%d\n", prev_touches, curr_touches);
dbg("cur#: %d, tch : 0x%x\n", curr_touches,tch);
dbg("touch12_id : 0x%x, touch34_id : 0x%x\n", g_xy_data.touch12_id,g_xy_data.touch34_id);
/* extract xy_data for all currently reported touches */
for (tch = 0; tch < curr_touches; tch++)
{
if (tch < 2)
{
id = (tch & 0x01) ?
GET_TOUCH2_ID(g_xy_data.touch12_id) : //LO_TRACKID(*(ts->tch_map[tch].id)) :
GET_TOUCH1_ID(g_xy_data.touch12_id); //HI_TRACKID(*(ts->tch_map[tch].id));
// touch_mask[id] = tch;
if (tch == 0)
{
if(id < TOUCH_MAX_NUM){
cur_trk[id].tch = CYTTSP_TOUCH;
cur_trk[id].abs[CY_ABS_X_OST] = be16_to_cpu(g_xy_data.x1);
cur_trk[id].abs[CY_ABS_Y_OST] = be16_to_cpu(g_xy_data.y1);
cur_trk[id].abs[CY_ABS_P_OST] = g_xy_data.z1;
cur_trk[id].abs[CY_ABS_W_OST] = CYTTSP_SMALL_TOOL_WIDTH;
cur_trk[id].abs[CY_ABS_ID_OST] = id;
}else{
if(Touch_Dbg_Enable)
printk("over Max touch ID id#: %d !!!\n", id);
}
}
if (tch == 1)
{
if(id < TOUCH_MAX_NUM){
cur_trk[id].tch = CYTTSP_TOUCH;
cur_trk[id].abs[CY_ABS_X_OST] = be16_to_cpu(g_xy_data.x2);
cur_trk[id].abs[CY_ABS_Y_OST] = be16_to_cpu(g_xy_data.y2);
cur_trk[id].abs[CY_ABS_P_OST] = g_xy_data.z2;
cur_trk[id].abs[CY_ABS_W_OST] = CYTTSP_SMALL_TOOL_WIDTH;
cur_trk[id].abs[CY_ABS_ID_OST] = id;
}else{
if(Touch_Dbg_Enable)
printk("over Max touch ID id#: %d !!!\n", id);
}
}
}
else
{
id = (tch & 0x01) ?
GET_TOUCH4_ID(g_xy_data.touch34_id) : //LO_TRACKID(*(ts->tch_map[tch].id)) :
GET_TOUCH3_ID(g_xy_data.touch34_id); //HI_TRACKID(*(ts->tch_map[tch].id));
if (tch == 2)
{
if(id < TOUCH_MAX_NUM){
cur_trk[id].tch = CYTTSP_TOUCH;
cur_trk[id].abs[CY_ABS_X_OST] = be16_to_cpu(g_xy_data.x3);
cur_trk[id].abs[CY_ABS_Y_OST] = be16_to_cpu(g_xy_data.y3);
cur_trk[id].abs[CY_ABS_P_OST] = g_xy_data.z3;
cur_trk[id].abs[CY_ABS_W_OST] = CYTTSP_SMALL_TOOL_WIDTH;
cur_trk[id].abs[CY_ABS_ID_OST] = id;
}else{
if(Touch_Dbg_Enable)
printk("over Max touch ID id#: %d !!!\n", id);
}
}
if (tch == 3)
{
if(id < TOUCH_MAX_NUM){
cur_trk[id].tch = CYTTSP_TOUCH;
cur_trk[id].abs[CY_ABS_X_OST] = be16_to_cpu(g_xy_data.x4);
cur_trk[id].abs[CY_ABS_Y_OST] = be16_to_cpu(g_xy_data.y4);
cur_trk[id].abs[CY_ABS_P_OST] = g_xy_data.z4;
cur_trk[id].abs[CY_ABS_W_OST] = CYTTSP_SMALL_TOOL_WIDTH;
cur_trk[id].abs[CY_ABS_ID_OST] = id;
}else{
if(Touch_Dbg_Enable)
printk("over Max touch ID id#: %d !!!\n", id);
}
}
}
if(id < TOUCH_MAX_NUM){
touch_mask[id] = tch;
}else{
if(Touch_Dbg_Enable)
printk("over Max touch ID id#: %d !!!\n", id);
}
dbg("tch#: %d, ID: %d, Xpos: %d, Ypos: %d\n",
tch, id, cur_trk[id].abs[CY_ABS_X_OST], cur_trk[id].abs[CY_ABS_Y_OST]);
}
// Release Event
if ( curr_touches == 0 )
{
dbg("Touch Released\n");
for (i=0; i < TOUCH_MAX_NUM; i++)
{
if (cur_touchflag[i] /*cur_trk[i].abs[CY_ABS_ID_OST] >= 0*/)
{
cur_trk[i].abs[CY_ABS_ID_OST] = -1;
cur_touchflag[i] = 0;
input_mt_slot(ts->input, i);
input_report_abs(ts->input, ABS_MT_TRACKING_ID, cur_trk[i].abs[CY_ABS_ID_OST]);
dbg("Touch Released 1, I : %d\n",i);
}
}
}
// Finger Touched
else
{
dbg("Touch Pressed\n");
for (i=0; i<TOUCH_MAX_NUM; i++)
{
if ( touch_mask[i] < 0 ) // 1 case - the 1st finger : touched / the 2nd finger : released
{
#if 1
if ( cur_trk[i].abs[CY_ABS_ID_OST] >= 0 )
{
cur_touchflag[cur_trk[i].abs[CY_ABS_ID_OST]] = 0;
cur_trk[i].abs[CY_ABS_ID_OST] = -1;
input_mt_slot(ts->input, i);
input_report_abs(ts->input, ABS_MT_TRACKING_ID, cur_trk[i].abs[CY_ABS_ID_OST]);
dbg("Touch Pressed 1\n");
}
#endif
}
else
{
if(touch_mask[i] >= 0) // if finger is touched
{
input_mt_slot(ts->input, i);
if ( (cur_touchflag[cur_trk[i].abs[CY_ABS_ID_OST]] == 0) )
{
dbg("Touch Pressed 2-1 I:%d, touchflag : %d\n",i,cur_touchflag[i]);
cur_touchflag[cur_trk[i].abs[CY_ABS_ID_OST]] = 1;
//cur_trk[i].abs[CY_ABS_ID_OST] = input_mt_new_trkid(ts->input);
input_report_abs(ts->input, ABS_MT_TRACKING_ID, cur_trk[i].abs[CY_ABS_ID_OST]);
}
// Move Event
// input_report_abs(ts->input, ABS_MT_TRACKING_ID, cur_trk[i].abs[CY_ABS_ID_OST]);
input_report_abs(ts->input, ABS_MT_POSITION_X, cur_trk[i].abs[CY_ABS_X_OST]);
input_report_abs(ts->input, ABS_MT_POSITION_Y, cur_trk[i].abs[CY_ABS_Y_OST]);
input_report_abs(ts->input, ABS_MT_PRESSURE, cur_trk[i].abs[CY_ABS_P_OST]);
input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, 1);
input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR, cur_trk[i].abs[CY_ABS_P_OST]);
dbg("Touch Pressed 2-2 I:%d, touchflag : %d\n",i,cur_touchflag[cur_trk[i].abs[CY_ABS_ID_OST]]);
}
}
}
}
input_report_key(ts->input, BTN_TOUCH, (curr_touches>0)? 1:0 );
input_sync(ts->input);
// update previous touch num
prev_touches = curr_touches;
goto exit_xy_worker;
#if 0
/* provide input event signaling for each active touch */
for (id = 0, num_sent = 0; id < CYTTSP_NUM_TRACK_ID; id++)
{
if (cur_trk[id].tch)
{
t = cur_trk[id].abs[CY_ABS_ID_OST];
/* send 0 based track id's */
t -= 1;
input_report_abs(ts->input, ABS_MT_TRACKING_ID, t);
input_report_abs(ts->input, ABS_MT_POSITION_X, cur_trk[id].abs[CY_ABS_X_OST]);
input_report_abs(ts->input, ABS_MT_POSITION_Y, cur_trk[id].abs[CY_ABS_Y_OST]);
input_report_abs(ts->input, ABS_MT_PRESSURE, cur_trk[id].abs[CY_ABS_P_OST]);
input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, cur_trk[id].abs[CY_ABS_W_OST]);
num_sent++;
input_mt_sync(ts->input);
ts->prv_trk[id] = cur_trk[id];
ts->prv_trk[id].abs[CY_ABS_ID_OST] = t;
#if 0
cyttsp_dbg(ts, CY_DBG_LVL_1, "%s: ID:%3d X:%3d Y:%3d " "Z:%3d W=%3d T=%3d\n", __func__, id,
cur_trk[id].abs[CY_ABS_X_OST],
cur_trk[id].abs[CY_ABS_Y_OST],
cur_trk[id].abs[CY_ABS_P_OST],
cur_trk[id].abs[CY_ABS_W_OST],
t);
#endif
}
else if ((ABS_MT_PRESSURE == ABS_MT_TOUCH_MAJOR) && ts->prv_trk[id].tch)
{
/*
* pre-Gingerbread:
* need to report last position with
* and report that position with
* no touch if the touch lifts off
*/
input_report_abs(ts->input, ABS_MT_TRACKING_ID, ts->prv_trk[id].abs[CY_ABS_ID_OST]);
input_report_abs(ts->input, ABS_MT_POSITION_X, ts->prv_trk[id].abs[CY_ABS_X_OST]);
input_report_abs(ts->input, ABS_MT_POSITION_Y, ts->prv_trk[id].abs[CY_ABS_Y_OST]);
input_report_abs(ts->input, ABS_MT_PRESSURE, CYTTSP_NOTOUCH);
input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, CYTTSP_NOTOUCH);
num_sent++;
input_mt_sync(ts->input);
ts->prv_trk[id].tch = CYTTSP_NOTOUCH;
#if 0
cyttsp_dbg(ts, CY_DBG_LVL_1,
"%s: ID:%3d X:%3d Y:%3d "
"Z:%3d W=%3d T=%3d liftoff\n",
__func__, ts->prv_trk[id].abs[CY_ABS_ID_OST],
ts->prv_trk[id].abs[CY_ABS_X_OST],
ts->prv_trk[id].abs[CY_ABS_Y_OST],
CYTTSP_NOTOUCH,
CYTTSP_NOTOUCH,
ts->prv_trk[id].abs[CY_ABS_ID_OST]);
#endif
}
}
if (num_sent == 0)
{
/* in case of 0-touch; all liftoff; Gingerbread+ */
input_mt_sync(ts->input);
}
input_sync(ts->input);
// update previous touch num
prev_touches = curr_touches;
goto exit_xy_worker;
#endif
exit_xy_worker:
/*if(cyttsp_disable_touch) {
cyttsp_debug("Not enabling touch\n");
}
else*/ { // N1037 20120521 for ICS cyttsp_disable_touch ¼³Á¤µÇ¸é¼ touch¿Àµ¿ÀÛ ¹ß»ý ICS org¿¡¼ »ç¿ëµÇÁö ¾Ê´Â ÄÚµå ÀÓ
if(ts->client->irq == 0) {
/* restart event timer */
mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT);
}
else {
/* re-enable the interrupt after processing */
enable_irq(ts->client->irq);
}
}
return;
}
#if 0
static int cyttsp_inlist(u16 prev_track[], u8 curr_track_id, u8 *prev_loc, u8 num_touches)
{
u8 id =0;
*prev_loc = CYTTSP_IGNORE_TOUCH;
cyttsp_xdebug("IN p[%d]=%d c=%d n=%d loc=%d\n", \
id, prev_track[id], curr_track_id, num_touches, *prev_loc);
for (id = 0, *prev_loc = CYTTSP_IGNORE_TOUCH;
(id < num_touches); id++) {
cyttsp_xdebug("p[%d]=%d c=%d n=%d loc=%d\n", \
id, prev_track[id], curr_track_id, num_touches, *prev_loc);
if (prev_track[id] == curr_track_id) {
*prev_loc = id;
break;
}
}
cyttsp_xdebug("OUT p[%d]=%d c=%d n=%d loc=%d\n", \
id, prev_track[id], curr_track_id, num_touches, *prev_loc);
return ((*prev_loc < CYTTSP_NUM_TRACK_ID) ? true : false);
}
static int cyttsp_next_avail_inlist(u16 curr_track[], u8 *new_loc, u8 num_touches)
{
u8 id;
for (id = 0, *new_loc = CYTTSP_IGNORE_TOUCH;
(id < num_touches); id++) {
if (curr_track[id] > CYTTSP_NUM_TRACK_ID) {
*new_loc = id;
break;
}
}
return ((*new_loc < CYTTSP_NUM_TRACK_ID) ? true : false);
}
#endif
/* Timer function used as dummy interrupt driver */
static void cyttsp_timer(unsigned long handle)
{
struct cyttsp *ts = (struct cyttsp *) handle;
cyttsp_xdebug("TTSP Device timer event\n");
#ifdef FEATURE_CYTTSP_HEARTBEAT
/* schedule motion signal handling */
if(start_heartbeat_timer)
{
schedule_work(&ts->work2);
mod_timer(&ts->timer, jiffies + CYTTSP_HEARTBEAT_TIME * HZ);
}
#else
/* schedule motion signal handling */
schedule_work(&ts->work);
#endif
return;
}
/* ************************************************************************
* ISR function. This function is general, initialized in drivers init
* function
* ************************************************************************ */
static irqreturn_t cyttsp_irq(int irq, void *handle)
{
struct cyttsp *ts = (struct cyttsp *) handle;
cyttsp_xdebug("%s: Got IRQ\n", CYTTSP_I2C_NAME);
if(Touch_Status >= TOUCH_POWEROFF)
return IRQ_HANDLED;
/* disable further interrupts until this interrupt is processed */
disable_irq_nosync(ts->client->irq);
/* schedule motion signal handling */
schedule_work(&ts->work);
return IRQ_HANDLED;
}
#ifdef CYTTSP_INCLUDE_LOAD_FILE //[BIH] ICS port...
/* ************************************************************************
* Probe initialization functions
* ************************************************************************ */
static int cyttsp_putbl(struct cyttsp *ts, int show, int show_status, int show_version, int show_cid)
{
int retval = CYTTSP_OPERATIONAL;
int num_bytes = (show_status * 3) + (show_version * 6) + (show_cid * 3);
if (show_cid) {
num_bytes = sizeof(struct cyttsp_bootloader_data_t);
}
else if (show_version) {
num_bytes = sizeof(struct cyttsp_bootloader_data_t) - 3;
}
else {
num_bytes = sizeof(struct cyttsp_bootloader_data_t) - 9;
}
if (show) {
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
retval = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_BASE,
num_bytes, (u8 *)&g_bl_data);
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
if (show_status) {
cyttsp_debug("BL%d: f=%02X s=%02X err=%02X bl=%02X%02X bld=%02X%02X\n", \
show, \
g_bl_data.bl_file, g_bl_data.bl_status, g_bl_data.bl_error, \
g_bl_data.blver_hi, g_bl_data.blver_lo, \
g_bl_data.bld_blver_hi, g_bl_data.bld_blver_lo);
}
if (show_version) {
cyttsp_debug("BL%d: ttspver=0x%02X%02X appid=0x%02X%02X appver=0x%02X%02X\n", \
show, \
g_bl_data.ttspver_hi, g_bl_data.ttspver_lo, \
g_bl_data.appid_hi, g_bl_data.appid_lo, \
g_bl_data.appver_hi, g_bl_data.appver_lo);
}
if (show_cid) {
cyttsp_debug("BL%d: cid=0x%02X%02X%02X\n", \
show, \
g_bl_data.cid_0, g_bl_data.cid_1, g_bl_data.cid_2);
}
mdelay(CYTTSP_DELAY_DFLT);
}
return retval;
}
#endif //CYTTSP_INCLUDE_LOAD_FILE //[BIH] ICS port...
#ifdef CYTTSP_INCLUDE_LOAD_FILE
#define CYTTSP_MAX_I2C_LEN 256
#define CYTTSP_MAX_TRY 10
#define CYTTSP_BL_PAGE_SIZE 16
#define CYTTSP_BL_NUM_PAGES 5
static int cyttsp_i2c_write_block_data(struct i2c_client *client, u8 command,
u8 length, const u8 *values)
{
int retval = CYTTSP_OPERATIONAL;
u8 dataray[CYTTSP_MAX_I2C_LEN];
u8 try;
dataray[0] = command;
if (length) {
memcpy(&dataray[1], values, length);
}
try = CYTTSP_MAX_TRY;
do {
retval = i2c_master_send(client, dataray, length+1);
mdelay(CYTTSP_DELAY_DFLT*2);
}
while ((retval != length+1) && try--);
return retval;
}
static int cyttsp_i2c_write_block_data_chunks(struct cyttsp *ts, u8 command,
u8 length, const u8 *values)
{
int retval = CYTTSP_OPERATIONAL;
int block = 1;
u8 dataray[CYTTSP_MAX_I2C_LEN];
/* first page already includes the bl page offset */
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE,
CYTTSP_BL_PAGE_SIZE+1, values);
mdelay(10);
values += CYTTSP_BL_PAGE_SIZE+1;
length -= CYTTSP_BL_PAGE_SIZE+1;
/* rem blocks require bl page offset stuffing */
while (length && (block < CYTTSP_BL_NUM_PAGES) && !(retval < CYTTSP_OPERATIONAL)) {
dataray[0] = CYTTSP_BL_PAGE_SIZE*block;
memcpy(&dataray[1], values,
length >= CYTTSP_BL_PAGE_SIZE ? CYTTSP_BL_PAGE_SIZE : length);
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE,
length >= CYTTSP_BL_PAGE_SIZE ? CYTTSP_BL_PAGE_SIZE+1 : length+1, dataray);
mdelay(10);
values += CYTTSP_BL_PAGE_SIZE;
length = length >= CYTTSP_BL_PAGE_SIZE ? length - CYTTSP_BL_PAGE_SIZE : 0;
block++;
}
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
return retval;
}
static int cyttsp_bootload_app(struct cyttsp *ts)
{
int retval = CYTTSP_OPERATIONAL;
int i, tries;
u8 host_reg;
cyttsp_debug("load new firmware \n");
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
/* reset TTSP Device back to bootloader mode */
host_reg = CYTTSP_SOFT_RESET_MODE;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE,
sizeof(host_reg), &host_reg);
/* wait for TTSP Device to complete reset back to bootloader */
// mdelay(CYTTSP_DELAY_DFLT);
mdelay(1000);
cyttsp_putbl(ts,3, true, true, true);
cyttsp_debug("load file -- tts_ver=0x%02X%02X app_id=0x%02X%02X app_ver=0x%02X%02X\n", \
cyttsp_fw_tts_verh, cyttsp_fw_tts_verl, \
cyttsp_fw_app_idh, cyttsp_fw_app_idl, \
cyttsp_fw_app_verh, cyttsp_fw_app_verl);
/* download new TTSP Application to the Bootloader
*
*/
if (!(retval < CYTTSP_OPERATIONAL)) {
i = 0;
/* send bootload initiation command */
if (cyttsp_fw[i].Command == CYTTSP_BL_INIT_LOAD) {
g_bl_data.bl_file = 0;
g_bl_data.bl_status = 0;
g_bl_data.bl_error = 0;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE,
cyttsp_fw[i].Length, cyttsp_fw[i].Block);
/* delay to allow bootloader to get ready for block writes */
i++;
tries = 0;
cyttsp_debug("wait init f=%02X, s=%02X, e=%02X t=%d\n",g_bl_data.bl_file,
g_bl_data.bl_status, g_bl_data.bl_error, tries);
do {
mdelay(1000);
cyttsp_putbl(ts,4, true, false, false);
}
while (g_bl_data.bl_status != 0x10 &&
g_bl_data.bl_status != 0x11 &&
tries++ < 10);
/* send bootload firmware load blocks -
* kernel limits transfers to I2C_SMBUS_BLOCK_MAX(32) bytes
*/
if (!(retval < CYTTSP_OPERATIONAL)) {
while (cyttsp_fw[i].Command == CYTTSP_BL_WRITE_BLK) {
retval = cyttsp_i2c_write_block_data_chunks(ts,
CYTTSP_REG_BASE,
cyttsp_fw[i].Length, cyttsp_fw[i].Block);
// if (cyttsp_fw[i].Address & 0x01) {
// mdelay(CYTTSP_DELAY_DNLOAD);
// }
// else {
// mdelay(CYTTSP_DELAY_DNLOAD);
// }
/* bootloader requires delay after odd block addresses */
mdelay(100);
cyttsp_debug("BL DNLD Rec=% 3d Len=% 3d Addr=%04X\n",
cyttsp_fw[i].Record, cyttsp_fw[i].Length,
cyttsp_fw[i].Address);
i++;
if (retval < CYTTSP_OPERATIONAL) {
cyttsp_debug("BL fail Rec=%3d retval=%d\n",cyttsp_fw[i-1].Record, retval);
break;
}
else {
/* reset TTSP I2C counter */
retval = cyttsp_i2c_write_block_data(ts->client,
CYTTSP_REG_BASE,
0, NULL);
mdelay(10);
/* set arg2 to non-0 to activate */
cyttsp_putbl(ts,5, true, false, false);
}
}
if (!(retval < CYTTSP_OPERATIONAL)) {
while (i < cyttsp_fw_records) {
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE,
cyttsp_fw[i].Length, cyttsp_fw[i].Block);
i++;
tries = 0;
cyttsp_debug("wait init f=%02X, s=%02X, e=%02X t=%d\n",g_bl_data.bl_file,
g_bl_data.bl_status, g_bl_data.bl_error, tries);
do {
mdelay(1000);
cyttsp_putbl(ts,6, true, false, false);
}
while (g_bl_data.bl_status != 0x10 &&
g_bl_data.bl_status != 0x11 &&
tries++ < 10);
cyttsp_putbl(ts,7, true, false, false);
if (retval < CYTTSP_OPERATIONAL) {
break;
}
}
}
}
}
}
/* Do we need to reset TTSP Device back to bootloader mode?? */
/*
*/
host_reg = CYTTSP_SOFT_RESET_MODE;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE,
sizeof(host_reg), &host_reg);
/* wait for TTSP Device to complete reset back to bootloader */
/*
*/
mdelay(1000);
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
/* set arg2 to non-0 to activate */
retval = cyttsp_putbl(ts, 8, true, true, true);
return retval;
}
#else
#if 0
static int cyttsp_bootload_app(struct cyttsp *ts)
{
cyttsp_debug("no-load new firmware \n");
return CYTTSP_OPERATIONAL;
}
#endif
#endif /* CYTTSP_INCLUDE_LOAD_FILE */
#if 0
static int cyttsp_power_on(struct cyttsp *ts)
{
int retval = CYTTSP_OPERATIONAL;
u8 host_reg;
int tries;
static u8 bl_cmd[] = {
CYTTSP_BL_FILE0, CYTTSP_BL_CMD, CYTTSP_BL_EXIT,
CYTTSP_BL_KEY0, CYTTSP_BL_KEY1, CYTTSP_BL_KEY2,
CYTTSP_BL_KEY3, CYTTSP_BL_KEY4, CYTTSP_BL_KEY5,
CYTTSP_BL_KEY6, CYTTSP_BL_KEY7};
cyttsp_debug("Power up \n");
/* check if the TTSP device has a bootloader installed */
host_reg = CYTTSP_SOFT_RESET_MODE;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE,
sizeof(host_reg), &host_reg);
tries = 0;
do {
mdelay(1000);
/* set arg2 to non-0 to activate */
retval = cyttsp_putbl(ts, 1, true, true, true);
cyttsp_info("BL%d: f=%02X s=%02X err=%02X bl=%02X%02X bld=%02X%02X R=%d\n", \
101, \
g_bl_data.bl_file, g_bl_data.bl_status, g_bl_data.bl_error, \
g_bl_data.blver_hi, g_bl_data.blver_lo, \
g_bl_data.bld_blver_hi, g_bl_data.bld_blver_lo,
retval);
cyttsp_info("BL%d: tver=%02X%02X a_id=%02X%02X aver=%02X%02X\n", \
102, \
g_bl_data.ttspver_hi, g_bl_data.ttspver_lo, \
g_bl_data.appid_hi, g_bl_data.appid_lo, \
g_bl_data.appver_hi, g_bl_data.appver_lo);
cyttsp_info("BL%d: c_id=%02X%02X%02X\n", \
103, \
g_bl_data.cid_0, g_bl_data.cid_1, g_bl_data.cid_2);
}
while (!(retval < CYTTSP_OPERATIONAL) &&
!GET_BOOTLOADERMODE(g_bl_data.bl_status) &&
!(g_bl_data.bl_file == CYTTSP_OPERATE_MODE + CYTTSP_LOW_POWER_MODE) &&
tries++ < 10);
/* is bootloader missing? */
if (!(retval < CYTTSP_OPERATIONAL)) {
cyttsp_xdebug("Retval=%d Check if bootloader is missing...\n", retval);
if (!GET_BOOTLOADERMODE(g_bl_data.bl_status)) {
/* skip all bootloader and sys info and go straight to operational mode */
if (!(retval < CYTTSP_OPERATIONAL)) {
cyttsp_xdebug("Bootloader is missing (retval = %d)\n", retval);
host_reg = CYTTSP_OPERATE_MODE/* + CYTTSP_LOW_POWER_MODE*/;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE,
sizeof(host_reg), &host_reg);
/* wait for TTSP Device to complete switch to Operational mode */
mdelay(1000);
goto bypass;
}
}
}
/* take TTSP out of bootloader mode; go to TrueTouch operational mode */
if (!(retval < CYTTSP_OPERATIONAL)) {
cyttsp_xdebug1("exit bootloader; go operational\n");
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE,
sizeof(bl_cmd), bl_cmd);
tries = 0;
do {
mdelay(1000);
cyttsp_putbl(ts,4, true, false, false);
cyttsp_info("BL%d: f=%02X s=%02X err=%02X bl=%02X%02X bld=%02X%02X\n", \
104, \
g_bl_data.bl_file, g_bl_data.bl_status, g_bl_data.bl_error, \
g_bl_data.blver_hi, g_bl_data.blver_lo, \
g_bl_data.bld_blver_hi, g_bl_data.bld_blver_lo);
}
while (GET_BOOTLOADERMODE(g_bl_data.bl_status) &&
tries++ < 10);
}
if (!(retval < CYTTSP_OPERATIONAL) &&
cyttsp_app_load()) {
mdelay(1000);
if (CYTTSP_DIFF(g_bl_data.ttspver_hi, cyttsp_tts_verh()) ||
CYTTSP_DIFF(g_bl_data.ttspver_lo, cyttsp_tts_verl()) ||
CYTTSP_DIFF(g_bl_data.appid_hi, cyttsp_app_idh()) ||
CYTTSP_DIFF(g_bl_data.appid_lo, cyttsp_app_idl()) ||
CYTTSP_DIFF(g_bl_data.appver_hi, cyttsp_app_verh()) ||
CYTTSP_DIFF(g_bl_data.appver_lo, cyttsp_app_verl()) ||
CYTTSP_DIFF(g_bl_data.cid_0, cyttsp_cid_0()) ||
CYTTSP_DIFF(g_bl_data.cid_1, cyttsp_cid_1()) ||
CYTTSP_DIFF(g_bl_data.cid_2, cyttsp_cid_2()) ||
cyttsp_force_fw_load()) {
cyttsp_debug("blttsp=0x%02X%02X flttsp=0x%02X%02X force=%d\n", \
g_bl_data.ttspver_hi, g_bl_data.ttspver_lo, \
cyttsp_tts_verh(), cyttsp_tts_verl(), cyttsp_force_fw_load());
cyttsp_debug("blappid=0x%02X%02X flappid=0x%02X%02X\n", \
g_bl_data.appid_hi, g_bl_data.appid_lo, \
cyttsp_app_idh(), cyttsp_app_idl());
cyttsp_debug("blappver=0x%02X%02X flappver=0x%02X%02X\n", \
g_bl_data.appver_hi, g_bl_data.appver_lo, \
cyttsp_app_verh(), cyttsp_app_verl());
cyttsp_debug("blcid=0x%02X%02X%02X flcid=0x%02X%02X%02X\n", \
g_bl_data.cid_0, g_bl_data.cid_1, g_bl_data.cid_2, \
cyttsp_cid_0(), cyttsp_cid_1(), cyttsp_cid_2());
/* enter bootloader to load new app into TTSP Device */
retval = cyttsp_bootload_app(ts);
/* take TTSP device out of bootloader mode; switch back to TrueTouch operational mode */
if (!(retval < CYTTSP_OPERATIONAL)) {
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE,
sizeof(bl_cmd), bl_cmd);
/* wait for TTSP Device to complete switch to Operational mode */
mdelay(1000);
}
}
}
bypass:
/* switch to System Information mode to read versions and set interval registers */
if (!(retval < CYTTSP_OPERATIONAL)) {
cyttsp_debug("switch to sysinfo mode \n");
host_reg = CYTTSP_SYSINFO_MODE;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE,
sizeof(host_reg), &host_reg);
/* wait for TTSP Device to complete switch to SysInfo mode */
mdelay(1000);
if (!(retval < CYTTSP_OPERATIONAL)) {
retval = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_BASE,
sizeof(struct cyttsp_sysinfo_data_t), (u8 *)&g_sysinfo_data);
cyttsp_debug("SI2: hst_mode=0x%02X mfg_cmd=0x%02X mfg_stat=0x%02X\n", \
g_sysinfo_data.hst_mode, g_sysinfo_data.mfg_cmd, \
g_sysinfo_data.mfg_stat);
cyttsp_debug("SI2: bl_ver=0x%02X%02X\n", \
g_sysinfo_data.bl_verh, g_sysinfo_data.bl_verl);
cyttsp_debug("SI2: sysinfo act_int=0x%02X tch_tmout=0x%02X lp_int=0x%02X\n", \
g_sysinfo_data.act_intrvl, g_sysinfo_data.tch_tmout, \
g_sysinfo_data.lp_intrvl);
cyttsp_info("SI%d: tver=%02X%02X a_id=%02X%02X aver=%02X%02X\n", \
102, \
g_sysinfo_data.tts_verh, g_sysinfo_data.tts_verl, \
g_sysinfo_data.app_idh, g_sysinfo_data.app_idl, \
g_sysinfo_data.app_verh, g_sysinfo_data.app_verl);
cyttsp_info("SI%d: c_id=%02X%02X%02X\n", \
103, \
g_sysinfo_data.cid[0], g_sysinfo_data.cid[1], g_sysinfo_data.cid[2]);
if (!(retval < CYTTSP_OPERATIONAL) &&
(CYTTSP_DIFF(ts->platform_data->act_intrvl, CYTTSP_ACT_INTRVL_DFLT) ||
CYTTSP_DIFF(ts->platform_data->tch_tmout, CYTTSP_TCH_TMOUT_DFLT) ||
CYTTSP_DIFF(ts->platform_data->lp_intrvl, CYTTSP_LP_INTRVL_DFLT))) {
if (!(retval < CYTTSP_OPERATIONAL)) {
u8 intrvl_ray[sizeof(ts->platform_data->act_intrvl) +
sizeof(ts->platform_data->tch_tmout) +
sizeof(ts->platform_data->lp_intrvl)];
u8 i = 0;
intrvl_ray[i++] = ts->platform_data->act_intrvl;
intrvl_ray[i++] = ts->platform_data->tch_tmout;
intrvl_ray[i++] = ts->platform_data->lp_intrvl;
cyttsp_debug("SI2: platinfo act_intrvl=0x%02X tch_tmout=0x%02X lp_intrvl=0x%02X\n", \
ts->platform_data->act_intrvl, ts->platform_data->tch_tmout, \
ts->platform_data->lp_intrvl);
// set intrvl registers
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_ACT_INTRVL,
sizeof(intrvl_ray), intrvl_ray);
mdelay(CYTTSP_DELAY_SYSINFO);
}
}
}
/* switch back to Operational mode */
cyttsp_debug("switch back to operational mode \n");
if (!(retval < CYTTSP_OPERATIONAL)) {
host_reg = CYTTSP_OPERATE_MODE/* + CYTTSP_LOW_POWER_MODE*/;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE,
sizeof(host_reg), &host_reg);
/* wait for TTSP Device to complete switch to Operational mode */
mdelay(1000);
}
}
/* init gesture setup;
* this is required even if not using gestures
* in order to set the active distance */
if (!(retval < CYTTSP_OPERATIONAL)) {
u8 gesture_setup;
cyttsp_debug("init gesture setup \n");
gesture_setup = ts->platform_data->gest_set;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_GEST_SET,
sizeof(gesture_setup), &gesture_setup);
mdelay(CYTTSP_DELAY_DFLT);
}
if (!(retval < CYTTSP_OPERATIONAL)) {
ts->platform_data->power_state = CYTTSP_ACTIVE_STATE;
}
else {
ts->platform_data->power_state = CYTTSP_IDLE_STATE;
}
cyttsp_debug("Retval=%d Power state is %s\n", retval, (ts->platform_data->power_state == CYTTSP_ACTIVE_STATE) ? "ACTIVE" : "IDLE");
return retval;
}
#endif
/* cyttsp_initialize: Driver Initialization. This function takes
* care of the following tasks:
* 1. Create and register an input device with input layer
* 2. Take CYTTSP device out of bootloader mode; go operational
* 3. Start any timers/Work queues. */
static int cyttsp_initialize(struct i2c_client *client, struct cyttsp *ts)
{
struct input_dev *input_device;
int error = 0;
int retval = CYTTSP_OPERATIONAL;
u8 id;
#ifdef FEATURE_SKY_PROCESS_CMD_KEY
cyttsp_data = ts;
#endif
/* Create the input device and register it. */
input_device = input_allocate_device();
if (!input_device) {
error = -ENOMEM;
cyttsp_xdebug1("err input allocate device\n");
goto error_free_device;
}
if (!client) {
error = ~ENODEV;
cyttsp_xdebug1("err client is Null\n");
goto error_free_device;
}
if (!ts) {
error = ~ENODEV;
cyttsp_xdebug1("err context is Null\n");
goto error_free_device;
}
ts->input = input_device;
input_device->name = CYTTSP_I2C_NAME;
input_device->phys = ts->phys;
input_device->dev.parent = &client->dev;
set_bit(EV_SYN, input_device->evbit);
set_bit(EV_KEY, input_device->evbit);
set_bit(EV_ABS, input_device->evbit);
set_bit(BTN_TOUCH, input_device->keybit);
//set_bit(BTN_2, input_device->keybit); // N1037 20120321 fix
#ifdef FEATURE_SKY_PROCESS_CMD_KEY
// +++ FEATURE_P_VZW_PS_STABILITY_AT_CMD
set_bit(KEY_MENU, input_device->keybit);
set_bit(KEY_BACK, input_device->keybit);
set_bit(KEY_POWER, input_device->keybit);
set_bit(KEY_HOMEPAGE, input_device->keybit);
// ---
set_bit(KEY_SEARCH, input_device->keybit);
set_bit(KEY_0, input_device->keybit);
set_bit(KEY_1, input_device->keybit);
set_bit(KEY_2, input_device->keybit);
set_bit(KEY_3, input_device->keybit);
set_bit(KEY_4, input_device->keybit);
set_bit(KEY_5, input_device->keybit);
set_bit(KEY_6, input_device->keybit);
set_bit(KEY_7, input_device->keybit);
set_bit(KEY_8, input_device->keybit);
set_bit(KEY_9, input_device->keybit);
set_bit(0xe3, input_device->keybit); /* '*' */
set_bit(0xe4, input_device->keybit); /* '#' */
set_bit(KEY_LEFTSHIFT, input_device->keybit);
set_bit(KEY_RIGHTSHIFT, input_device->keybit);
set_bit(KEY_LEFT, input_device->keybit);
set_bit(KEY_RIGHT, input_device->keybit);
set_bit(KEY_UP, input_device->keybit);
set_bit(KEY_DOWN, input_device->keybit);
set_bit(KEY_ENTER, input_device->keybit);
set_bit(KEY_SEND, input_device->keybit);
set_bit(KEY_END, input_device->keybit);
set_bit(KEY_VOLUMEUP, input_device->keybit);
set_bit(KEY_VOLUMEDOWN, input_device->keybit);
set_bit(KEY_CLEAR, input_device->keybit);
set_bit(KEY_CAMERA, input_device->keybit);
set_bit(KEY_DELETE, input_device->keybit);
set_bit(KEY_WWW, input_device->keybit);
#endif // FEATURE_SKY_PROCESS_CMD_KEY
// for ICS version - 0511 KJHW
/* clear current touch tracking structures */
memset(cur_trk, 0, sizeof(cur_trk));
for (id = 0; id < TOUCH_MAX_NUM; id++)
{
cur_trk[id].abs[CY_ABS_ID_OST] = -1;
cur_touchflag[id] = 0;
}
input_mt_init_slots(input_device, TOUCH_MAX_NUM);
if (ts->platform_data->use_gestures) {
set_bit(BTN_3, input_device->keybit);
}
input_set_abs_params(input_device, ABS_X, 0, ts->platform_data->maxx, 0, 0);
input_set_abs_params(input_device, ABS_Y, 0, ts->platform_data->maxy, 0, 0);
input_set_abs_params(input_device, ABS_TOOL_WIDTH, 0, CYTTSP_LARGE_TOOL_WIDTH, 0 ,0);
input_set_abs_params(input_device, ABS_PRESSURE, 0, CYTTSP_MAXZ, 0, 0);
input_set_abs_params(input_device, ABS_HAT0X, 0, ts->platform_data->maxx, 0, 0);
input_set_abs_params(input_device, ABS_HAT0Y, 0, ts->platform_data->maxy, 0, 0);
if (ts->platform_data->use_gestures) {
input_set_abs_params(input_device, ABS_HAT1X, 0, CYTTSP_MAXZ, 0, 0);
input_set_abs_params(input_device, ABS_HAT1Y, 0, CYTTSP_MAXZ, 0, 0);
}
if (ts->platform_data->use_mt) {
input_set_abs_params(input_device, ABS_MT_POSITION_X, 0, ts->platform_data->maxx, 0, 0);
input_set_abs_params(input_device, ABS_MT_POSITION_Y, 0, ts->platform_data->maxy, 0, 0);
input_set_abs_params(input_device, ABS_MT_TOUCH_MAJOR, 0, CYTTSP_MAXZ, 0, 0);
input_set_abs_params(input_device, ABS_MT_WIDTH_MAJOR, 0, CYTTSP_LARGE_TOOL_WIDTH, 0, 0);
if (ts->platform_data->use_trk_id) {
input_set_abs_params(input_device, ABS_MT_TRACKING_ID, 0, CYTTSP_NUM_TRACK_ID, 0, 0);
}
}
// +++ FEATURE_P_VZW_PS_STABILITY_AT_CMD
input_set_abs_params(input_device, ABS_MT_TOOL_TYPE, 0, 1, 0, 0);
// ---
cyttsp_info("%s: Register input device\n", CYTTSP_I2C_NAME);
error = input_register_device(input_device);
if (error) {
cyttsp_alert("%s: Failed to register input device\n", CYTTSP_I2C_NAME);
retval = error;
goto error_free_device;
}
else
cyttsp_info("%s: Register input device success...\n", CYTTSP_I2C_NAME);
/* Prepare our worker structure prior to setting up the timer/ISR */
INIT_WORK(&ts->work,cyttsp_xy_worker);
#ifdef FEATURE_CYTTSP_HEARTBEAT
INIT_WORK(&ts->work2,cyttsp_check_heartbeat);
#endif
#ifdef FEATURE_CYTTSP_FIRMWAREUPGRADE
//INIT_WORK(&ts->work3,check_firmware_update); // N1037 20120312 for ICS
INIT_DELAYED_WORK(&ts->work3,check_firmware_update);
#endif
/* Power on the chip and make sure that I/Os are set as specified
* in the platform
*/
#if 0 //Cypress ¾÷ü ¿äû»çÇ×.
retval = cyttsp_power_on(ts);
if (retval < 0) {
goto error_free_device;
}
#endif
/* Timer or Interrupt setup */
if(ts->client->irq == 0) {
cyttsp_info("Setting up timer\n");
setup_timer(&ts->timer, cyttsp_timer, (unsigned long) ts);
mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT);
}
else {
#ifdef FEATURE_CYTTSP_HEARTBEAT
start_heartbeat_timer = true;
setup_timer(&ts->timer, cyttsp_timer, (unsigned long) ts);
mod_timer(&ts->timer, jiffies + CYTTSP_HEARTBEAT_TIME * 20 * HZ); //óÀ½ ½ÃÀÛÀº 60ÃÊ µÚ¿¡ ŸÀÌ¸Ó µ¹°Ô ÇÔ ºÎÆý𣿡 ¿µÇâÀ» ÁÖÁö ¾Ê±â À§ÇÔ.
#endif
cyttsp_info("Setting up interrupt\n");
/* request_irq() will also call enable_irq() */
error = request_irq (client->irq,cyttsp_irq,IRQF_TRIGGER_FALLING,
client->dev.driver->name,ts);
if (error) {
cyttsp_alert("error: could not request irq\n");
retval = error;
goto error_free_irq;
}
}
atomic_set(&ts->irq_enabled, 1);
retval = device_create_file(&ts->client->dev, &dev_attr_irq_enable);
if (retval < CYTTSP_OPERATIONAL) {
cyttsp_alert("File device creation failed: %d\n", retval);
retval = -ENODEV;
goto error_free_irq;
}
cyttsp_info("%s: Successful registration\n", CYTTSP_I2C_NAME);
goto success;
error_free_irq:
cyttsp_alert("Error: Failed to register IRQ handler\n");
free_irq(client->irq,ts);
error_free_device:
if (input_device) {
input_free_device(input_device);
}
success:
return retval;
}
static int pantech_auto_check(u8* return_byte)
{
u8 host_reg, byte_data[4], prev_data=0xff, byte_node1[MAX_NODE], byte_node2[MAX_NODE], send_byte[MAX_NODE];
int retval = CYTTSP_OPERATIONAL, retry_cnt = 100, i;
struct cyttsp *ts = ts_temp;
dbg("pantech_auto_check!! start\n");
// If phone enter a poweroff, Stop firmware update
if(Touch_Status >= TOUCH_POWEROFF)
return -1;
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
// Enter Test Mode
host_reg = CYTTSP_TEST_MODE;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE, sizeof(host_reg), &host_reg);
msleep(100);
// Read Raw counts or baseline
byte_data[0] = 0x00;
do {
/* Read Count */
retval = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_MODE , sizeof(byte_data), (u8 *)&byte_data);
msleep(10);
}
while (byte_data[0] == prev_data && --retry_cnt);
prev_data = byte_data[0];
do {
/* Read Count
Must set a i2c.h I2C_SMBUS_BLOCK_MAX 32 -> I2C_SMBUS_BLOCK_MAX 256
*/
retval = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_SENSOR_BASE , sizeof(byte_node1), (u8 *)&byte_node1);
msleep(10);
}
while (retval < CYTTSP_OPERATIONAL && --retry_cnt);
// Read Raw counts or baseline
host_reg = CYTTSP_T_TEST_MODE;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE, sizeof(host_reg), &host_reg);
msleep(100);
byte_data[0] = 0x00;
retry_cnt = 100;
do {
/* Read Count */
retval = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_MODE , sizeof(byte_data), (u8 *)&byte_data);
msleep(10);
}
while (byte_data[0] == prev_data && --retry_cnt);
prev_data = byte_data[0];
do {
/* Read Count */
retval = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_SENSOR_BASE , sizeof(byte_node2), (u8 *)&byte_node2);
msleep(10);
}
while (retval < CYTTSP_OPERATIONAL && --retry_cnt);
for(i=0; i<MAX_NODE; i++)
{
if(byte_node1[i] >= CYTTSP_BASE_MIN && byte_node1[i] <= CYTTSP_BASE_MAX &&
byte_node2[i] >= CYTTSP_BASE_MIN && byte_node2[i] <= CYTTSP_BASE_MAX)
send_byte[i] = 0;
else
send_byte[i] = 1;
// printk("Check Valid %d, byte_node1 %d, byte_node2 %d : %d\n", i , byte_node1[i], byte_node2[i], send_byte[i]);
}
// Retrun Operate Mode
host_reg = CYTTSP_OPERATE_MODE;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE, sizeof(host_reg), &host_reg);
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
msleep(100);
dbg("pantech_auto_check!! end\n");
for(i=0;i<MAX_NODE;i++)
return_byte[i]=send_byte[i];
return 0;
}
static int pantech_selftest_check(void)
{
u8 host_reg, byte_data[2];
int retval = CYTTSP_OPERATIONAL;
struct cyttsp *ts = ts_temp;
printk("pantech_selftest_check!! start\n");
// If phone enter a poweroff, Stop firmware update
if(Touch_Status >= TOUCH_POWEROFF)
return -1;
#ifdef CYTTSP_MUTEX_LOCK
mutex_lock(&ts->lock_mutex);
#endif
// Enter system information Mode
host_reg = CYTTSP_SYSINFO_MODE;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE, sizeof(host_reg), &host_reg);
msleep(100);
// Start self test
host_reg = 0x01;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_SELF_TEST, sizeof(host_reg), &host_reg);
msleep(1000);
// Read test result
retval = i2c_smbus_read_i2c_block_data(ts->client, CYTTSP_REG_SELF_TEST , sizeof(byte_data), (u8 *)&byte_data);
printk("0x18 test: %02x, 0x19 test: %02x\n", byte_data[0], byte_data[1]);
// Retrun Operate Mode
host_reg = CYTTSP_OPERATE_MODE;
retval = i2c_smbus_write_i2c_block_data(ts->client, CYTTSP_REG_BASE, sizeof(host_reg), &host_reg);
#ifdef CYTTSP_MUTEX_LOCK
mutex_unlock(&ts->lock_mutex);
#endif
msleep(100);
printk("pantech_selftest_check!! end\n");
if(byte_data[0] != 0)
return CYTTSP_BIST_PROCESS;
else if(byte_data[1] != 0xff)
{
if(!(byte_data[1] & 0x01))
return CYTTSP_OPEN_TEST;
else if(!(byte_data[1] & 0x02))
return CYTTSP_SHORT_GND;
else if(!(byte_data[1] & 0x04))
return CYTTSP_SHORT_VDD;
else if(!(byte_data[1] & 0x08))
return CYTTSP_SHORT_PIN;
else if(!(byte_data[1] & 0x10))
return CYTTSP_LOCAL_IDAC;
else if(!(byte_data[1] & 0x20))
return CYTTSP_GLOBAL_IDAC;
else if(!(byte_data[1] & 0x40))
return CYTTSP_BASELINE_TEST;
else if(!(byte_data[1] & 0x80))
return CYTTSP_COMPLETE_BIT;
}
else
return 0;
return 0;
}
static void init_hw_setting(void)
{
int rc;
struct regulator *vreg_touch, *vreg_power_1_8;
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_CHG, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG_ENABLE);
gpio_set_value(GPIO_TOUCH_CHG, 0);
// Power On, AVDD
vreg_touch = regulator_get(NULL, "8058_l19");
if (IS_ERR(vreg_touch))
{
rc = PTR_ERR(vreg_touch);
printk(KERN_ERR "%s: regulator get of %s failed (%d)\n",
__func__, (char *) vreg_touch, rc);
}
rc = regulator_set_voltage(vreg_touch, 3000000, 3000000);
if (rc) {
printk(KERN_ERR "%s: vreg set level failed (%d)\n", __func__, rc);
return;
}
rc = regulator_enable(vreg_touch);
if (rc) {
printk(KERN_ERR "%s: vreg enable failed (%d)\n",__func__, rc);
return;
}
#if EF33S_BDVER_GE(WS20) || EF34K_BDVER_GE(WS20)
vreg_power_1_8 = regulator_get(NULL, "8058_l11");
if (IS_ERR(vreg_power_1_8))
{
rc = PTR_ERR(vreg_power_1_8);
printk(KERN_ERR "%s: regulator get of %s failed (%d)\n",
__func__, (char *) vreg_power_1_8, rc);
}
rc = regulator_set_voltage(vreg_power_1_8, 1900000, 1900000);
if (rc) {
printk(KERN_ERR "%s: vreg set level failed (%d)\n", __func__, rc);
return;
}
rc = regulator_enable(vreg_power_1_8);
if (rc) {
printk(KERN_ERR "%s: vreg enable failed (%d)\n",__func__, rc);
return;
}
#else
// Power On DVDD
vreg_power_1_8 = regulator_get(NULL, "8058_lvs0");
if (IS_ERR(vreg_power_1_8))
{
rc = PTR_ERR(vreg_power_1_8);
printk(KERN_ERR "%s: regulator get of %s failed (%d)\n",
__func__, (char *) vreg_power_1_8, rc);
}
rc = regulator_enable(vreg_power_1_8);
#endif
gpio_set_value(GPIO_TOUCH_CHG, 1);
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_CHG, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG_ENABLE);
gpio_tlmm_config(GPIO_CFG(GPIO_TOUCH_ID, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_ENABLE);
gpio_set_value(GPIO_TOUCH_ID, 0);
regulator_put(vreg_touch);
regulator_put(vreg_power_1_8);
msleep(100);
Touch_Status = TOUCH_POWERON;
}
/* I2C driver probe function */
static int __devinit cyttsp_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct cyttsp *ts;
int error;
int retval = CYTTSP_OPERATIONAL;
#ifdef FEATURE_SKY_PROCESS_CMD_KEY
int rc;
#endif
cyttsp_info("Start Probe\n");
/* allocate and clear memory */
ts = kzalloc (sizeof(struct cyttsp),GFP_KERNEL);
if (ts == NULL) {
cyttsp_xdebug1("err kzalloc for cyttsp\n");
retval = -ENOMEM;
}
init_hw_setting();
if (!(retval < CYTTSP_OPERATIONAL)) {
/* register driver_data */
ts->client = client;
ts->platform_data = client->dev.platform_data;
i2c_set_clientdata(client,ts);
ts->client->irq = IRQ_TOUCH_INT;
error = cyttsp_initialize(client, ts);
if (error) {
cyttsp_xdebug1("err cyttsp_initialize\n");
if (ts != NULL) {
/* deallocate memory */
kfree(ts);
}
/*
i2c_del_driver(&cyttsp_driver);
*/
retval = -ENODEV;
}
else {
cyttsp_openlog();
}
}
#ifdef CONFIG_HAS_EARLYSUSPEND
if (!(retval < CYTTSP_OPERATIONAL)) {
ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
ts->early_suspend.suspend = cyttsp_early_suspend;
ts->early_suspend.resume = cyttsp_late_resume;
register_early_suspend(&ts->early_suspend);
}
#endif /* CONFIG_HAS_EARLYSUSPEND */
#ifdef FEATURE_SKY_PROCESS_CMD_KEY
rc = misc_register(&touch_event);
if (rc) {
pr_err("::::::::: can''t register touch_fops\n");
}
#endif
cyttsp_info("Start Probe %s\n", (retval < CYTTSP_OPERATIONAL) ? "FAIL" : "PASS");
ts_temp = ts;
#ifdef CYTTSP_MUTEX_LOCK
mutex_init(&ts->lock_mutex);
#endif
#ifdef FEATURE_CYTTSP_FIRMWAREUPGRADE
#if defined(CONFIG_APACHE_BOARD)
schedule_work(&ts->work3);
// INIT_DELAYED_WORK(&work_delay_firmware,check_firmware_update);
// schedule_delayed_work(&work_delay_firmware, msecs_to_jiffies(30000));
#elif defined(CONFIG_EF33_BOARD) || defined(CONFIG_EF34_BOARD)
#if EF33S_BDVER_GE(WS20) || EF34K_BDVER_GE(WS20)
//schedule_work(&ts->work3); // N1037 20120312 for ICS
schedule_delayed_work(&ts->work3, msecs_to_jiffies(3000));
#endif
#else
schedule_work(&ts->work3);
#endif
#endif
wake_lock_init(&touch_wake_lock, WAKE_LOCK_SUSPEND, "touch");
return retval;
}
/* Function to manage power-on resume */
static int cyttsp_resume(struct i2c_client *client)
{
struct cyttsp *ts;
int ret=0;
int retval = CYTTSP_OPERATIONAL;
dbg("Wake Up\n");
ts = (struct cyttsp *) i2c_get_clientdata(client);
if(ts == NULL)
return retval;
pantech_ctl_update(ISSP_IOCTL_POWER_ALL , 1);
pantech_ctl_update(ISSP_IOCTL_INTR, 1);
pantech_ctl_update(ISSP_COMPLITED_UPDATA, 0);
Touch_Status = TOUCH_POWERON;
// for ICS version - 0511 KJHW
input_mt_init_slots(ts->input, TOUCH_MAX_NUM);
msleep(100);
ret = request_irq (client->irq,cyttsp_irq,IRQF_TRIGGER_FALLING, client->dev.driver->name,ts);
#ifdef FEATURE_CYTTSP_HEARTBEAT
mod_timer(&ts->timer, jiffies + CYTTSP_HEARTBEAT_TIME * HZ);
start_heartbeat_timer = true;
#endif
/* re-enable the interrupt after resuming */
// enable_irq(ts->client->irq);
cyttsp_debug("Wake Up %s\n", (retval < CYTTSP_OPERATIONAL) ? "FAIL" : "PASS" );
return retval;
}
/* Function to manage low power suspend */
static int cyttsp_suspend(struct i2c_client *client, pm_message_t message)
{
struct cyttsp *ts;
int retval = CYTTSP_OPERATIONAL, id =0;
dbg("Enter Sleep\n");
ts = (struct cyttsp *) i2c_get_clientdata(client);
if(ts == NULL)
return retval;
/* disable worker */
disable_irq_nosync(ts->client->irq);
#ifdef FEATURE_CYTTSP_HEARTBEAT
start_heartbeat_timer = false;
retval = cancel_work_sync(&ts->work2);
del_timer(&ts->timer);
#endif
#ifdef FEATURE_CYTTSP_FIRMWAREUPGRADE
// cancel_work_sync(&ts->work3);
#endif
Touch_Status = TOUCH_POWEROFF;
retval = cancel_work_sync(&ts->work);
pantech_ctl_update(ISSP_IOCTL_POWER_ALL, 0);
free_irq(client->irq,ts);
pantech_ctl_update(ISSP_IOCTL_SCLK_TO_GPIO, 1);
pantech_ctl_update(ISSP_IOCTL_DATA_TO_GPIO, 1);
pantech_ctl_update(ISSP_IOCTL_INTR, 0);
pantech_ctl_update(ISSP_IOCTL_SCLK, 0);
pantech_ctl_update(ISSP_IOCTL_DATA, 0);
// for ICS version - 0511 KJHW
for (id = 0; id < TOUCH_MAX_NUM; id++)
{
cur_trk[id].abs[CY_ABS_ID_OST] = -1;
cur_touchflag[id] = 0;
input_mt_slot(ts->input, id);
input_report_abs(ts->input, ABS_MT_TRACKING_ID, cur_trk[id].abs[CY_ABS_ID_OST]);
input_report_key(ts->input, BTN_TOUCH, 0 );
input_sync(ts->input);
}
// for ICS version - 0511 KJHW
input_mt_destroy_slots(ts->input);
return retval;
}
/* registered in driver struct */
static int __devexit cyttsp_remove(struct i2c_client *client)
{
struct cyttsp *ts;
int err;
cyttsp_alert("Unregister\n");
/* clientdata registered on probe */
ts = i2c_get_clientdata(client);
device_remove_file(&ts->client->dev, &dev_attr_irq_enable);
/* Start cleaning up by removing any delayed work and the timer */
if (cancel_delayed_work((struct delayed_work *)&ts->work)<0) {
cyttsp_alert("error: could not remove work from workqueue\n");
}
/* free up timer or irq */
if(ts->client->irq == 0) {
err = del_timer(&ts->timer);
if (err < 0) {
cyttsp_alert("error: failed to delete timer\n");
}
}
else {
#ifdef FEATURE_CYTTSP_HEARTBEAT
start_heartbeat_timer = false;
del_timer(&ts->timer);
#endif
free_irq(client->irq,ts);
}
/* housekeeping */
if (ts != NULL) {
kfree(ts);
}
/* clientdata registered on probe */
cyttsp_alert("Leaving\n");
return 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void cyttsp_early_suspend(struct early_suspend *handler)
{
struct cyttsp *ts;
ts = container_of(handler, struct cyttsp, early_suspend);
cyttsp_suspend(ts->client, PMSG_SUSPEND);
}
static void cyttsp_late_resume(struct early_suspend *handler)
{
struct cyttsp *ts;
ts = container_of(handler, struct cyttsp, early_suspend);
cyttsp_resume(ts->client);
}
#endif /* CONFIG_HAS_EARLYSUSPEND */
static int cyttsp_init(void)
{
int ret;
cyttsp_info("Cypress TrueTouch(R) Standard Product I2C Touchscreen Driver (Built %s @ %s)\n",__DATE__,__TIME__);
ret = i2c_add_driver(&cyttsp_driver);
return ret;
}
static void cyttsp_exit(void)
{
return i2c_del_driver(&cyttsp_driver);
}
module_init(cyttsp_init);
module_exit(cyttsp_exit);
| cmvega/cmvega | drivers/input/touchscreen/cyttsp-i2c.c | C | gpl-2.0 | 80,215 |
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#include "acresrc.h"
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsutils")
u8 acpi_rs_decode_bitmask(u16 mask, u8 * list)
{
u8 i;
u8 bit_count;
ACPI_FUNCTION_ENTRY();
/* Decode the mask bits */
for (i = 0, bit_count = 0; mask; i++) {
if (mask & 0x0001) {
list[bit_count] = i;
bit_count++;
}
mask >>= 1;
}
return (bit_count);
}
u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
{
u32 i;
u16 mask;
ACPI_FUNCTION_ENTRY();
/* Encode the list into a single bitmask */
for (i = 0, mask = 0; i < count; i++) {
mask |= (0x1 << list[i]);
}
return mask;
}
void
acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
{
u32 i;
ACPI_FUNCTION_ENTRY();
/* One move per item */
for (i = 0; i < item_count; i++) {
switch (move_type) {
/*
* For the 8-bit case, we can perform the move all at once
* since there are no alignment or endian issues
*/
case ACPI_RSC_MOVE8:
ACPI_MEMCPY(destination, source, item_count);
return;
/*
* 16-, 32-, and 64-bit cases must use the move macros that perform
* endian conversion and/or accomodate hardware that cannot perform
* misaligned memory transfers
*/
case ACPI_RSC_MOVE16:
ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i],
&ACPI_CAST_PTR(u16, source)[i]);
break;
case ACPI_RSC_MOVE32:
ACPI_MOVE_32_TO_32(&ACPI_CAST_PTR(u32, destination)[i],
&ACPI_CAST_PTR(u32, source)[i]);
break;
case ACPI_RSC_MOVE64:
ACPI_MOVE_64_TO_64(&ACPI_CAST_PTR(u64, destination)[i],
&ACPI_CAST_PTR(u64, source)[i]);
break;
default:
return;
}
}
}
void
acpi_rs_set_resource_length(acpi_rsdesc_size total_length,
union aml_resource *aml)
{
acpi_rs_length resource_length;
ACPI_FUNCTION_ENTRY();
/* Length is the total descriptor length minus the header length */
resource_length = (acpi_rs_length)
(total_length - acpi_ut_get_resource_header_length(aml));
/* Length is stored differently for large and small descriptors */
if (aml->small_header.descriptor_type & ACPI_RESOURCE_NAME_LARGE) {
/* Large descriptor -- bytes 1-2 contain the 16-bit length */
ACPI_MOVE_16_TO_16(&aml->large_header.resource_length,
&resource_length);
} else {
/* Small descriptor -- bits 2:0 of byte 0 contain the length */
aml->small_header.descriptor_type = (u8)
/* Clear any existing length, preserving descriptor type bits */
((aml->small_header.
descriptor_type & ~ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK)
| resource_length);
}
}
void
acpi_rs_set_resource_header(u8 descriptor_type,
acpi_rsdesc_size total_length,
union aml_resource *aml)
{
ACPI_FUNCTION_ENTRY();
/* Set the Resource Type */
aml->small_header.descriptor_type = descriptor_type;
/* Set the Resource Length */
acpi_rs_set_resource_length(total_length, aml);
}
static u16 acpi_rs_strcpy(char *destination, char *source)
{
u16 i;
ACPI_FUNCTION_ENTRY();
for (i = 0; source[i]; i++) {
destination[i] = source[i];
}
destination[i] = 0;
/* Return string length including the NULL terminator */
return ((u16) (i + 1));
}
acpi_rs_length
acpi_rs_get_resource_source(acpi_rs_length resource_length,
acpi_rs_length minimum_length,
struct acpi_resource_source * resource_source,
union aml_resource * aml, char *string_ptr)
{
acpi_rsdesc_size total_length;
u8 *aml_resource_source;
ACPI_FUNCTION_ENTRY();
total_length =
resource_length + sizeof(struct aml_resource_large_header);
aml_resource_source = ACPI_ADD_PTR(u8, aml, minimum_length);
/*
* resource_source is present if the length of the descriptor is longer than
* the minimum length.
*
* Note: Some resource descriptors will have an additional null, so
* we add 1 to the minimum length.
*/
if (total_length > (acpi_rsdesc_size) (minimum_length + 1)) {
/* Get the resource_source_index */
resource_source->index = aml_resource_source[0];
resource_source->string_ptr = string_ptr;
if (!string_ptr) {
/*
* String destination pointer is not specified; Set the String
* pointer to the end of the current resource_source structure.
*/
resource_source->string_ptr =
ACPI_ADD_PTR(char, resource_source,
sizeof(struct acpi_resource_source));
}
/*
* In order for the Resource length to be a multiple of the native
* word, calculate the length of the string (+1 for NULL terminator)
* and expand to the next word multiple.
*
* Zero the entire area of the buffer.
*/
total_length = (u32)
ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) + 1;
total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
/* Copy the resource_source string to the destination */
resource_source->string_length =
acpi_rs_strcpy(resource_source->string_ptr,
ACPI_CAST_PTR(char,
&aml_resource_source[1]));
return ((acpi_rs_length) total_length);
}
/* resource_source is not present */
resource_source->index = 0;
resource_source->string_length = 0;
resource_source->string_ptr = NULL;
return (0);
}
acpi_rsdesc_size
acpi_rs_set_resource_source(union aml_resource * aml,
acpi_rs_length minimum_length,
struct acpi_resource_source * resource_source)
{
u8 *aml_resource_source;
acpi_rsdesc_size descriptor_length;
ACPI_FUNCTION_ENTRY();
descriptor_length = minimum_length;
/* Non-zero string length indicates presence of a resource_source */
if (resource_source->string_length) {
/* Point to the end of the AML descriptor */
aml_resource_source = ACPI_ADD_PTR(u8, aml, minimum_length);
/* Copy the resource_source_index */
aml_resource_source[0] = (u8) resource_source->index;
/* Copy the resource_source string */
ACPI_STRCPY(ACPI_CAST_PTR(char, &aml_resource_source[1]),
resource_source->string_ptr);
/*
* Add the length of the string (+ 1 for null terminator) to the
* final descriptor length
*/
descriptor_length +=
((acpi_rsdesc_size) resource_source->string_length + 1);
}
/* Return the new total length of the AML descriptor */
return (descriptor_length);
}
acpi_status
acpi_rs_get_prt_method_data(struct acpi_namespace_node * node,
struct acpi_buffer * ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
ACPI_FUNCTION_TRACE(rs_get_prt_method_data);
/* Parameters guaranteed valid by caller */
/* Execute the method, no parameters */
status = acpi_ut_evaluate_object(node, METHOD_NAME__PRT,
ACPI_BTYPE_PACKAGE, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Create a resource linked list from the byte stream buffer that comes
* back from the _CRS method execution.
*/
status = acpi_rs_create_pci_routing_table(obj_desc, ret_buffer);
/* On exit, we must delete the object returned by evaluate_object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
acpi_status
acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
struct acpi_buffer *ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
ACPI_FUNCTION_TRACE(rs_get_crs_method_data);
/* Parameters guaranteed valid by caller */
/* Execute the method, no parameters */
status = acpi_ut_evaluate_object(node, METHOD_NAME__CRS,
ACPI_BTYPE_BUFFER, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Make the call to create a resource linked list from the
* byte stream buffer that comes back from the _CRS method
* execution.
*/
status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
/* On exit, we must delete the object returned by evaluate_object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
struct acpi_buffer *ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
ACPI_FUNCTION_TRACE(rs_get_prs_method_data);
/* Parameters guaranteed valid by caller */
/* Execute the method, no parameters */
status = acpi_ut_evaluate_object(node, METHOD_NAME__PRS,
ACPI_BTYPE_BUFFER, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Make the call to create a resource linked list from the
* byte stream buffer that comes back from the _CRS method
* execution.
*/
status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
/* On exit, we must delete the object returned by evaluate_object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
#endif /* ACPI_FUTURE_USAGE */
acpi_status
acpi_rs_get_method_data(acpi_handle handle,
char *path, struct acpi_buffer *ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
ACPI_FUNCTION_TRACE(rs_get_method_data);
/* Parameters guaranteed valid by caller */
/* Execute the method, no parameters */
status =
acpi_ut_evaluate_object(handle, path, ACPI_BTYPE_BUFFER, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Make the call to create a resource linked list from the
* byte stream buffer that comes back from the method
* execution.
*/
status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
/* On exit, we must delete the object returned by evaluate_object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
acpi_status
acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
struct acpi_buffer *in_buffer)
{
struct acpi_evaluate_info *info;
union acpi_operand_object *args[2];
acpi_status status;
struct acpi_buffer buffer;
ACPI_FUNCTION_TRACE(rs_set_srs_method_data);
/* Allocate and initialize the evaluation information block */
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
info->prefix_node = node;
info->pathname = METHOD_NAME__SRS;
info->parameters = args;
info->flags = ACPI_IGNORE_RETURN_VALUE;
/*
* The in_buffer parameter will point to a linked list of
* resource parameters. It needs to be formatted into a
* byte stream to be sent in as an input parameter to _SRS
*
* Convert the linked list into a byte stream
*/
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_rs_create_aml_resources(in_buffer->pointer, &buffer);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/* Create and initialize the method parameter object */
args[0] = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
if (!args[0]) {
/*
* Must free the buffer allocated above (otherwise it is freed
* later)
*/
ACPI_FREE(buffer.pointer);
status = AE_NO_MEMORY;
goto cleanup;
}
args[0]->buffer.length = (u32) buffer.length;
args[0]->buffer.pointer = buffer.pointer;
args[0]->common.flags = AOPOBJ_DATA_VALID;
args[1] = NULL;
/* Execute the method, no return value is expected */
status = acpi_ns_evaluate(info);
/* Clean up and return the status from acpi_ns_evaluate */
acpi_ut_remove_reference(args[0]);
cleanup:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
| luckasfb/OT_903D-kernel-2.6.35.7 | kernel/drivers/acpi/acpica/rsutils.c | C | gpl-2.0 | 11,290 |