Skip to content

Commit

Permalink
htable: handle blob/text columns when loading from database
Browse files Browse the repository at this point in the history
- reported by Patrick Ginhoux

(cherry picked from commit 24e7296)
  • Loading branch information
miconda committed Apr 4, 2017
1 parent 1ed8f8a commit 16a54ce
Showing 1 changed file with 35 additions and 3 deletions.
38 changes: 35 additions & 3 deletions src/modules/htable/ht_db.c
Expand Up @@ -114,7 +114,8 @@ static char ht_name_buf[HT_NAME_BUF_SIZE];
static int ht_pack_values(ht_t *ht, db1_res_t* db_res,
int row, int cols, str *hvalue)
{
static char vbuf[4096];
#define HTABLE_PACK_BUF_SIZE 4096
static char vbuf[HTABLE_PACK_BUF_SIZE];
int c;
int len;
char *p;
Expand All @@ -128,15 +129,18 @@ static int ht_pack_values(ht_t *ht, db1_res_t* db_res,
len += strlen(RES_ROWS(db_res)[row].values[c].val.string_val);
} else if(RES_ROWS(db_res)[row].values[c].type == DB1_STR) {
len += RES_ROWS(db_res)[row].values[c].val.str_val.len;
} else if(RES_ROWS(db_res)[row].values[c].type == DB1_BLOB) {
len += RES_ROWS(db_res)[row].values[c].val.blob_val.len;
} else if(RES_ROWS(db_res)[row].values[c].type == DB1_INT) {
len += 12;
} else {
LM_ERR("unsupported data type for column %d\n", c);
return -1;
}
}
if(len + c>=4096) {
LM_ERR("too large values (need %d)\n", len+c);
if(len + c>=HTABLE_PACK_BUF_SIZE) {
LM_ERR("too large values (need %d, have %d)\n", len+c,
HTABLE_PACK_BUF_SIZE);
return -1;
}
p = vbuf;
Expand All @@ -151,6 +155,10 @@ static int ht_pack_values(ht_t *ht, db1_res_t* db_res,
strncpy(p, RES_ROWS(db_res)[row].values[c].val.str_val.s,
RES_ROWS(db_res)[row].values[c].val.str_val.len);
p += RES_ROWS(db_res)[row].values[c].val.str_val.len;
} else if(RES_ROWS(db_res)[row].values[c].type == DB1_BLOB) {
memcpy(p, RES_ROWS(db_res)[row].values[c].val.blob_val.s,
RES_ROWS(db_res)[row].values[c].val.blob_val.len);
p += RES_ROWS(db_res)[row].values[c].val.blob_val.len;
} else if(RES_ROWS(db_res)[row].values[c].type == DB1_INT) {
iv.s = sint2str(RES_ROWS(db_res)[row].values[c].val.int_val, &iv.len);
strncpy(p, iv.s, iv.len);
Expand Down Expand Up @@ -280,6 +288,14 @@ int ht_db_load_table(ht_t *ht, str *dbtable, int mode)
}
kname.len = (RES_ROWS(db_res)[i].values[0].val.str_val.len);
break;
case DB1_BLOB:
kname.s = (RES_ROWS(db_res)[i].values[0].val.blob_val.s);
if(kname.s==NULL) {
LM_ERR("null key in row %d\n", i);
goto error;
}
kname.len = (RES_ROWS(db_res)[i].values[0].val.blob_val.len);
break;
case DB1_STRING:
kname.s = (char*)(RES_ROWS(db_res)[i].values[0].val.string_val);
if(kname.s==NULL) {
Expand Down Expand Up @@ -392,6 +408,14 @@ int ht_db_load_table(ht_t *ht, str *dbtable, int mode)
}
str2sint(&kvalue, &val.n);
break;
case DB1_BLOB:
kvalue = RES_ROWS(db_res)[i].values[3].val.blob_val;
if(kvalue.s==NULL) {
LM_ERR("null value in row %d\n", i);
goto error;
}
str2sint(&kvalue, &val.n);
break;
case DB1_STRING:
kvalue.s = (char*)(RES_ROWS(db_res)[i].values[3].val.string_val);
if(kvalue.s==NULL) {
Expand Down Expand Up @@ -423,6 +447,14 @@ int ht_db_load_table(ht_t *ht, str *dbtable, int mode)
}
val.s = kvalue;
break;
case DB1_BLOB:
kvalue = RES_ROWS(db_res)[i].values[3].val.blob_val;
if(kvalue.s==NULL) {
LM_ERR("null value in row %d\n", i);
goto error;
}
val.s = kvalue;
break;
case DB1_STRING:
kvalue.s = (char*)(RES_ROWS(db_res)[i].values[3].val.string_val);
if(kvalue.s==NULL) {
Expand Down

0 comments on commit 16a54ce

Please sign in to comment.