command: Fix encoding of 16bit signed integers
The code wasn't properly sign-extending 16bit integers which caused int16_t reports in output() to appear as uint16_t. Signed-off-by: Kevin O'Connor <kevin@koconnor.net>
This commit is contained in:
parent
9702d522a4
commit
f28eb902df
|
@ -139,15 +139,17 @@ _sendf(uint8_t parserid, ...)
|
|||
param_types++;
|
||||
uint32_t v;
|
||||
switch (t) {
|
||||
case PT_byte:
|
||||
case PT_uint16:
|
||||
v = va_arg(args, unsigned int);
|
||||
goto encode_int;
|
||||
case PT_int16:
|
||||
v = (int32_t)va_arg(args, int);
|
||||
goto encode_int;
|
||||
case PT_uint32:
|
||||
case PT_int32:
|
||||
case PT_uint16:
|
||||
case PT_int16:
|
||||
case PT_byte:
|
||||
if (t >= PT_uint16)
|
||||
v = va_arg(args, int) & 0xffff;
|
||||
else
|
||||
v = va_arg(args, uint32_t);
|
||||
encode_int:
|
||||
p = encode_int(p, v);
|
||||
break;
|
||||
case PT_string: {
|
||||
|
|
Loading…
Reference in New Issue