func
string | target
string | cwe
sequence | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
static void mce_start_timer(struct timer_list *t)
{
unsigned long iv = check_interval * HZ;
if (mca_cfg.ignore_ce || !iv)
return;
this_cpu_write(mce_next_interval, iv);
__start_timer(t, iv);
} | Safe | [
"CWE-362"
] | linux | b3b7c4795ccab5be71f080774c45bbbcc75c2aaf | 2.2844591328517006e+38 | 10 | x86/MCE: Serialize sysfs changes
The check_interval file in
/sys/devices/system/machinecheck/machinecheck<cpu number>
directory is a global timer value for MCE polling. If it is changed by one
CPU, mce_restart() broadcasts the event to other CPUs to delete and restart
the MCE polling timer and __mcheck_cpu_init_timer() reinitializes the
mce_timer variable.
If more than one CPU writes a specific value to the check_interval file
concurrently, mce_timer is not protected from such concurrent accesses and
all kinds of explosions happen. Since only root can write to those sysfs
variables, the issue is not a big deal security-wise.
However, concurrent writes to these configuration variables is void of
reason so the proper thing to do is to serialize the access with a mutex.
Boris:
- Make store_int_with_restart() use device_store_ulong() to filter out
negative intervals
- Limit min interval to 1 second
- Correct locking
- Massage commit message
Signed-off-by: Seunghun Han <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: linux-edac <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected] | 0 |
static size_t PCLDeltaCompressImage(const size_t length,
const unsigned char *previous_pixels,const unsigned char *pixels,
unsigned char *compress_pixels)
{
int
delta,
j,
replacement;
register ssize_t
i,
x;
register unsigned char
*q;
q=compress_pixels;
for (x=0; x < (ssize_t) length; )
{
j=0;
for (i=0; x < (ssize_t) length; x++)
{
if (*pixels++ != *previous_pixels++)
{
i=1;
break;
}
j++;
}
while (x < (ssize_t) length)
{
x++;
if (*pixels == *previous_pixels)
break;
i++;
previous_pixels++;
pixels++;
}
if (i == 0)
break;
replacement=j >= 31 ? 31 : j;
j-=replacement;
delta=i >= 8 ? 8 : i;
*q++=(unsigned char) (((delta-1) << 5) | replacement);
if (replacement == 31)
{
for (replacement=255; j != 0; )
{
if (replacement > j)
replacement=j;
*q++=(unsigned char) replacement;
j-=replacement;
}
if (replacement == 255)
*q++='\0';
}
for (pixels-=i; i != 0; )
{
for (i-=delta; delta != 0; delta--)
*q++=(*pixels++);
if (i == 0)
break;
delta=(int) i;
if (i >= 8)
delta=8;
*q++=(unsigned char) ((delta-1) << 5);
}
}
return((size_t) (q-compress_pixels));
} | Safe | [
"CWE-401"
] | ImageMagick6 | ff840181f631b1b7f29160cae24d792fcd176bae | 2.684364704311009e+37 | 70 | https://github.com/ImageMagick/ImageMagick/issues/1520 | 0 |
static void i_defer_frees(gs_memory_t *mem, int defer)
{
} | Safe | [
"CWE-190"
] | ghostpdl | cfde94be1d4286bc47633c6e6eaf4e659bd78066 | 9.554290570389214e+36 | 3 | Bug 697985: bounds check the array allocations methods
The clump allocator has four allocation functions that use 'number of elements'
and 'size of elements' parameters (rather than a simple 'number of bytes').
Those need specific bounds checking. | 0 |
PJ_DEF(pj_status_t) pj_file_flush(pj_oshandle_t fd)
{
int rc;
rc = fflush((FILE*)fd);
if (rc == EOF) {
return PJ_RETURN_OS_ERROR(errno);
}
return PJ_SUCCESS;
} | Safe | [
"CWE-703",
"CWE-835"
] | pjproject | 947bc1ee6d05be10204b918df75a503415fd3213 | 2.629605683675584e+38 | 11 | Merge pull request from GHSA-rwgw-vwxg-q799
* Prevent potential infinite loop when parsing WAV format file
* Check if subchunk is negative.
* Fix and add checks
* Change data type from pj_ssize_t to long.
* Modify check
* Fix leak file descriptor and modify check on wav_playlist
* Move overflow/underflow check to pj_file_setpos()
* Use macro to simplify check
* modification based on comments
* Remove unnecessary casting
* Modification based on comments | 0 |
static void motor_off_callback(struct timer_list *t)
{
unsigned long nr = t - motor_off_timer;
unsigned char mask = ~(0x10 << UNIT(nr));
if (WARN_ON_ONCE(nr >= N_DRIVE))
return;
set_dor(FDC(nr), mask, 0);
} | Safe | [
"CWE-190",
"CWE-125"
] | linux | da99466ac243f15fbba65bd261bfc75ffa1532b6 | 1.497697247273528e+38 | 10 | floppy: fix out-of-bounds read in copy_buffer
This fixes a global out-of-bounds read access in the copy_buffer
function of the floppy driver.
The FDDEFPRM ioctl allows one to set the geometry of a disk. The sect
and head fields (unsigned int) of the floppy_drive structure are used to
compute the max_sector (int) in the make_raw_rw_request function. It is
possible to overflow the max_sector. Next, max_sector is passed to the
copy_buffer function and used in one of the memcpy calls.
An unprivileged user could trigger the bug if the device is accessible,
but requires a floppy disk to be inserted.
The patch adds the check for the .sect * .head multiplication for not
overflowing in the set_geometry function.
The bug was found by syzkaller.
Signed-off-by: Denis Efremov <[email protected]>
Tested-by: Willy Tarreau <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> | 0 |
R_API int r_sys_getpid() {
#if __UNIX__
return getpid ();
#elif __WINDOWS__
return GetCurrentProcessId();
#else
#warning r_sys_getpid not implemented for this platform
return -1;
#endif
} | Safe | [
"CWE-78"
] | radare2 | 04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9 | 7.756664957401996e+37 | 10 | Fix command injection on PDB download (#16966)
* Fix r_sys_mkdirp with absolute path on Windows
* Fix build with --with-openssl
* Use RBuffer in r_socket_http_answer()
* r_socket_http_answer: Fix read for big responses
* Implement r_str_escape_sh()
* Cleanup r_socket_connect() on Windows
* Fix socket being created without a protocol
* Fix socket connect with SSL ##socket
* Use select() in r_socket_ready()
* Fix read failing if received only protocol answer
* Fix double-free
* r_socket_http_get: Fail if req. SSL with no support
* Follow redirects in r_socket_http_answer()
* Fix r_socket_http_get result length with R2_CURL=1
* Also follow redirects
* Avoid using curl for downloading PDBs
* Use r_socket_http_get() on UNIXs
* Use WinINet API on Windows for r_socket_http_get()
* Fix command injection
* Fix r_sys_cmd_str_full output for binary data
* Validate GUID on PDB download
* Pass depth to socket_http_get_recursive()
* Remove 'r_' and '__' from static function names
* Fix is_valid_guid
* Fix for comments | 0 |
int PKCS7_signatureVerify(BIO *bio, PKCS7 *p7, PKCS7_SIGNER_INFO *si,
X509 *x509)
{
ASN1_OCTET_STRING *os;
EVP_MD_CTX *mdc_tmp, *mdc;
int ret = 0, i;
int md_type;
STACK_OF(X509_ATTRIBUTE) *sk;
BIO *btmp;
EVP_PKEY *pkey;
mdc_tmp = EVP_MD_CTX_new();
if (mdc_tmp == NULL) {
PKCS7err(PKCS7_F_PKCS7_SIGNATUREVERIFY, ERR_R_MALLOC_FAILURE);
goto err;
}
if (!PKCS7_type_is_signed(p7) && !PKCS7_type_is_signedAndEnveloped(p7)) {
PKCS7err(PKCS7_F_PKCS7_SIGNATUREVERIFY, PKCS7_R_WRONG_PKCS7_TYPE);
goto err;
}
md_type = OBJ_obj2nid(si->digest_alg->algorithm);
btmp = bio;
for (;;) {
if ((btmp == NULL) ||
((btmp = BIO_find_type(btmp, BIO_TYPE_MD)) == NULL)) {
PKCS7err(PKCS7_F_PKCS7_SIGNATUREVERIFY,
PKCS7_R_UNABLE_TO_FIND_MESSAGE_DIGEST);
goto err;
}
BIO_get_md_ctx(btmp, &mdc);
if (mdc == NULL) {
PKCS7err(PKCS7_F_PKCS7_SIGNATUREVERIFY, ERR_R_INTERNAL_ERROR);
goto err;
}
if (EVP_MD_CTX_type(mdc) == md_type)
break;
/*
* Workaround for some broken clients that put the signature OID
* instead of the digest OID in digest_alg->algorithm
*/
if (EVP_MD_pkey_type(EVP_MD_CTX_md(mdc)) == md_type)
break;
btmp = BIO_next(btmp);
}
/*
* mdc is the digest ctx that we want, unless there are attributes, in
* which case the digest is the signed attributes
*/
if (!EVP_MD_CTX_copy_ex(mdc_tmp, mdc))
goto err;
sk = si->auth_attr;
if ((sk != NULL) && (sk_X509_ATTRIBUTE_num(sk) != 0)) {
unsigned char md_dat[EVP_MAX_MD_SIZE], *abuf = NULL;
unsigned int md_len;
int alen;
ASN1_OCTET_STRING *message_digest;
if (!EVP_DigestFinal_ex(mdc_tmp, md_dat, &md_len))
goto err;
message_digest = PKCS7_digest_from_attributes(sk);
if (!message_digest) {
PKCS7err(PKCS7_F_PKCS7_SIGNATUREVERIFY,
PKCS7_R_UNABLE_TO_FIND_MESSAGE_DIGEST);
goto err;
}
if ((message_digest->length != (int)md_len) ||
(memcmp(message_digest->data, md_dat, md_len))) {
PKCS7err(PKCS7_F_PKCS7_SIGNATUREVERIFY, PKCS7_R_DIGEST_FAILURE);
ret = -1;
goto err;
}
if (!EVP_VerifyInit_ex(mdc_tmp, EVP_get_digestbynid(md_type), NULL))
goto err;
alen = ASN1_item_i2d((ASN1_VALUE *)sk, &abuf,
ASN1_ITEM_rptr(PKCS7_ATTR_VERIFY));
if (alen <= 0) {
PKCS7err(PKCS7_F_PKCS7_SIGNATUREVERIFY, ERR_R_ASN1_LIB);
ret = -1;
goto err;
}
if (!EVP_VerifyUpdate(mdc_tmp, abuf, alen))
goto err;
OPENSSL_free(abuf);
}
os = si->enc_digest;
pkey = X509_get0_pubkey(x509);
if (!pkey) {
ret = -1;
goto err;
}
i = EVP_VerifyFinal(mdc_tmp, os->data, os->length, pkey);
if (i <= 0) {
PKCS7err(PKCS7_F_PKCS7_SIGNATUREVERIFY, PKCS7_R_SIGNATURE_FAILURE);
ret = -1;
goto err;
}
ret = 1;
err:
EVP_MD_CTX_free(mdc_tmp);
return (ret);
} | Safe | [
"CWE-327"
] | openssl | 631f94db0065c78181ca9ba5546ebc8bb3884b97 | 2.5900730858767144e+38 | 111 | Fix a padding oracle in PKCS7_dataDecode and CMS_decrypt_set1_pkey
An attack is simple, if the first CMS_recipientInfo is valid but the
second CMS_recipientInfo is chosen ciphertext. If the second
recipientInfo decodes to PKCS #1 v1.5 form plaintext, the correct
encryption key will be replaced by garbage, and the message cannot be
decoded, but if the RSA decryption fails, the correct encryption key is
used and the recipient will not notice the attack.
As a work around for this potential attack the length of the decrypted
key must be equal to the cipher default key length, in case the
certifiate is not given and all recipientInfo are tried out.
The old behaviour can be re-enabled in the CMS code by setting the
CMS_DEBUG_DECRYPT flag.
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9777)
(cherry picked from commit 5840ed0cd1e6487d247efbc1a04136a41d7b3a37) | 0 |
static int sqlite_callback(void *data, int argc,
char **argv, char **azColName) {
struct ntopng_sqlite_state *s = (struct ntopng_sqlite_state*)data;
lua_newtable(s->vm);
for(int i=0; i<argc; i++)
lua_push_str_table_entry(s->vm, (const char*)azColName[i],
(char*)(argv[i] ? argv[i] : "NULL"));
lua_pushinteger(s->vm, ++s->num_rows);
lua_insert(s->vm, -2);
lua_settable(s->vm, -3);
return(0);
} | Safe | [
"CWE-254"
] | ntopng | 2e0620be3410f5e22c9aa47e261bc5a12be692c6 | 3.700217330345067e+37 | 16 | Added security fix to avoid escalating privileges to non-privileged users
Many thanks to Dolev Farhi for reporting it | 0 |
static int cmd_ls(void *data, const char *input) { // "ls"
RCore *core = (RCore *)data;
const char *arg = strchr (input, ' ');
if (arg) {
arg = r_str_trim_ro (arg + 1);
}
switch (*input) {
case '?': // "l?"
eprintf ("Usage: l[es] # ls to list files, le[ss] to less a file\n");
break;
case 'e': // "le"
if (arg) {
r_core_cmdf (core, "cat %s~..", arg);
} else {
eprintf ("Usage: less [file]\n");
}
break;
default: // "ls"
if (!arg) {
arg = "";
}
if (r_fs_check (core->fs, arg)) {
r_core_cmdf (core, "md %s", arg);
} else {
char *res = r_syscmd_ls (arg);
if (res) {
r_cons_print (res);
free (res);
}
}
break;
}
return 0;
} | Safe | [
"CWE-78"
] | radare2 | dd739f5a45b3af3d1f65f00fe19af1dbfec7aea7 | 1.6129905062399439e+38 | 34 | Fix #14990 - multiple quoted command parsing issue ##core
> "?e hello""?e world"
hello
world"
> "?e hello";"?e world"
hello
world | 0 |
static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
struct cftype *cft)
{
return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
} | Safe | [
"CWE-264"
] | linux-2.6 | 1a5a9906d4e8d1976b701f889d8f35d54b928f25 | 2.4781552771898786e+38 | 5 | mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> | 0 |
GF_Box *gnrv_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_GenericVisualSampleEntryBox, GF_ISOM_BOX_TYPE_GNRV);
gf_isom_video_sample_entry_init((GF_VisualSampleEntryBox*) tmp);
return (GF_Box *)tmp;
} | Safe | [
"CWE-787"
] | gpac | 388ecce75d05e11fc8496aa4857b91245007d26e | 2.560265202727491e+38 | 6 | fixed #1587 | 0 |
explicit UserDataLink(base::WeakPtr<WebContents> contents)
: web_contents(contents) {} | Safe | [] | electron | e9fa834757f41c0b9fe44a4dffe3d7d437f52d34 | 7.447632780040524e+37 | 2 | fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]> | 0 |
rpcapd_recv(SOCKET sock, char *buffer, size_t toread, uint32 *plen, char *errmsgbuf)
{
int nread;
char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors
if (toread > *plen)
{
// Tell the client and continue.
pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Message payload is too short");
return -2;
}
nread = sock_recv(sock, buffer, toread,
SOCK_RECEIVEALL_YES|SOCK_EOF_IS_ERROR, errbuf, PCAP_ERRBUF_SIZE);
if (nread == -1)
{
rpcapd_log(LOGPRIO_ERROR, "Read from client failed: %s", errbuf);
return -1;
}
*plen -= nread;
return 0;
} | Safe | [
"CWE-703",
"CWE-918"
] | libpcap | 33834cb2a4d035b52aa2a26742f832a112e90a0a | 2.9929547424322407e+38 | 21 | In the open request, reject capture sources that are URLs.
You shouldn't be able to ask a server to open a remote device on some
*other* server; just open it yourself.
This addresses Include Security issue F13: [libpcap] Remote Packet
Capture Daemon Allows Opening Capture URLs. | 0 |
static uint32_t avifSampleTableGetImageDelta(const avifSampleTable * sampleTable, int imageIndex)
{
int maxSampleIndex = 0;
for (uint32_t i = 0; i < sampleTable->timeToSamples.count; ++i) {
const avifSampleTableTimeToSample * timeToSample = &sampleTable->timeToSamples.timeToSample[i];
maxSampleIndex += timeToSample->sampleCount;
if ((imageIndex < maxSampleIndex) || (i == (sampleTable->timeToSamples.count - 1))) {
return timeToSample->sampleDelta;
}
}
// TODO: fail here?
return 1;
} | Safe | [
"CWE-703",
"CWE-787"
] | libavif | 0a8e7244d494ae98e9756355dfbfb6697ded2ff9 | 1.7568278092504455e+38 | 14 | Set max image size to 16384 * 16384
Fix https://crbug.com/oss-fuzz/24728 and
https://crbug.com/oss-fuzz/24734. | 0 |
SRC_SetReselectDistance(double distance)
{
if (reselect_distance != distance) {
reselect_distance = distance;
LOG(LOGS_INFO, "New reselect distance %f", distance);
}
} | Safe | [
"CWE-59"
] | chrony | e18903a6b56341481a2e08469c0602010bf7bfe3 | 1.1714130182002768e+38 | 7 | switch to new util file functions
Replace all fopen(), rename(), and unlink() calls with the new util
functions. | 0 |
static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
int result = 0;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
mutex_lock(&runtime->buffer_mutex);
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_SETUP:
case SNDRV_PCM_STATE_PREPARED:
if (atomic_read(&substream->mmap_count))
result = -EBADFD;
break;
default:
result = -EBADFD;
break;
}
snd_pcm_stream_unlock_irq(substream);
if (result)
goto unlock;
result = do_hw_free(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
unlock:
mutex_unlock(&runtime->buffer_mutex);
return result;
} | Safe | [
"CWE-125"
] | linux | 92ee3c60ec9fe64404dc035e7c41277d74aa26cb | 9.879269016867602e+37 | 30 | ALSA: pcm: Fix races among concurrent hw_params and hw_free calls
Currently we have neither proper check nor protection against the
concurrent calls of PCM hw_params and hw_free ioctls, which may result
in a UAF. Since the existing PCM stream lock can't be used for
protecting the whole ioctl operations, we need a new mutex to protect
those racy calls.
This patch introduced a new mutex, runtime->buffer_mutex, and applies
it to both hw_params and hw_free ioctl code paths. Along with it, the
both functions are slightly modified (the mmap_count check is moved
into the state-check block) for code simplicity.
Reported-by: Hu Jiahui <[email protected]>
Cc: <[email protected]>
Reviewed-by: Jaroslav Kysela <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Takashi Iwai <[email protected]> | 0 |
int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
ssize_t len)
{
handle_t *handle;
unsigned int max_blocks;
int ret = 0;
int ret2 = 0;
struct ext4_map_blocks map;
unsigned int credits, blkbits = inode->i_blkbits;
map.m_lblk = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
* If blocksize = 4096 offset = 3072 and len = 2048
*/
max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
map.m_lblk);
/*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, max_blocks);
while (ret >= 0 && ret < max_blocks) {
map.m_lblk += ret;
map.m_len = (max_blocks -= ret);
handle = ext4_journal_start(inode, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
}
ret = ext4_map_blocks(handle, inode, &map,
EXT4_GET_BLOCKS_IO_CONVERT_EXT);
if (ret <= 0) {
WARN_ON(ret <= 0);
printk(KERN_ERR "%s: ext4_ext_map_blocks "
"returned error inode#%lu, block=%u, "
"max_blocks=%u", __func__,
inode->i_ino, map.m_lblk, map.m_len);
}
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
if (ret <= 0 || ret2 )
break;
}
return ret > 0 ? ret2 : ret;
} | Safe | [
"CWE-703"
] | linux | 667eff35a1f56fa74ce98a0c7c29a40adc1ba4e3 | 3.05919713829968e+38 | 45 | ext4: reimplement convert and split_unwritten
Reimplement ext4_ext_convert_to_initialized() and
ext4_split_unwritten_extents() using ext4_split_extent()
Signed-off-by: Yongqiang Yang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
Tested-by: Allison Henderson <[email protected]> | 0 |
static __inline__ __sum16 tcp_v6_check(int len,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
__wsum base)
{
return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
} | Safe | [
"CWE-362"
] | linux-2.6 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | 7.550909768270244e+36 | 7 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> | 0 |
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
#ifdef CONFIG_X86_IO_APIC
if (use_pci_vector() && !platform_legacy_irq(gsi))
*irq = IO_APIC_VECTOR(gsi);
else
#endif
*irq = gsi_irq_sharing(gsi);
return 0;
} | Safe | [] | linux-2.6 | f0f4c3432e5e1087b3a8c0e6bd4113d3c37497ff | 1.6108004840157374e+38 | 10 | [PATCH] i386: add HPET(s) into resource map
Add HPET(s) into resource map. This will allow for the HPET(s) to be
visibile within /proc/iomem.
Signed-off-by: Aaron Durbin <[email protected]>
Signed-off-by: Andi Kleen <[email protected]> | 0 |
static void get_futex_key_refs(union futex_key *key)
{
if (!key->both.ptr)
return;
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
ihold(key->shared.inode);
break;
case FUT_OFF_MMSHARED:
atomic_inc(&key->private.mm->mm_count);
break;
}
} | Safe | [
"CWE-20"
] | linux | 6f7b0a2a5c0fb03be7c25bd1745baa50582348ef | 3.0425498852062388e+38 | 14 | futex: Forbid uaddr == uaddr2 in futex_wait_requeue_pi()
If uaddr == uaddr2, then we have broken the rule of only requeueing
from a non-pi futex to a pi futex with this call. If we attempt this,
as the trinity test suite manages to do, we miss early wakeups as
q.key is equal to key2 (because they are the same uaddr). We will then
attempt to dereference the pi_mutex (which would exist had the futex_q
been properly requeued to a pi futex) and trigger a NULL pointer
dereference.
Signed-off-by: Darren Hart <[email protected]>
Cc: Dave Jones <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/ad82bfe7f7d130247fbe2b5b4275654807774227.1342809673.git.dvhart@linux.intel.com
Signed-off-by: Thomas Gleixner <[email protected]> | 0 |
static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
struct ipip_net *ipn = net_generic(dev_net(dev), ipip_net_id);
tunnel->dev = dev;
strcpy(tunnel->parms.name, dev->name);
iph->version = 4;
iph->protocol = IPPROTO_IPIP;
iph->ihl = 5;
dev_hold(dev);
ipn->tunnels_wc[0] = tunnel;
} | Safe | [] | linux-2.6 | d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978 | 1.989242336329253e+38 | 16 | tunnels: fix netns vs proto registration ordering
Same stuff as in ip_gre patch: receive hook can be called before netns
setup is done, oopsing in net_generic().
Signed-off-by: Alexey Dobriyan <[email protected]>
Signed-off-by: David S. Miller <[email protected]> | 0 |
dissect_tcpopt_default_option(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int proto, int ett)
{
proto_item *item;
proto_tree *exp_tree;
proto_item *length_item;
int offset = 0;
item = proto_tree_add_item(tree, proto, tvb, offset, -1, ENC_NA);
exp_tree = proto_item_add_subtree(item, ett);
proto_tree_add_item(exp_tree, hf_tcp_option_kind, tvb, offset, 1, ENC_BIG_ENDIAN);
length_item = proto_tree_add_item(exp_tree, hf_tcp_option_len, tvb, offset + 1, 1, ENC_BIG_ENDIAN);
if (!tcp_option_len_check(length_item, pinfo, tvb_reported_length(tvb), 2))
return tvb_captured_length(tvb);
return tvb_captured_length(tvb);
} | Safe | [
"CWE-354"
] | wireshark | 7f3fe6164a68b76d9988c4253b24d43f498f1753 | 2.3836388323991605e+38 | 18 | TCP: do not use an unknown status when the checksum is 0xffff
Otherwise it triggers an assert when adding the column as the field is
defined as BASE_NONE and not BASE_DEC or BASE_HEX. Thus an unknown value
(not in proto_checksum_vals[)array) cannot be represented.
Mark the checksum as bad even if we process the packet.
Closes #16816
Conflicts:
epan/dissectors/packet-tcp.c | 0 |
static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
{
return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
} | Safe | [] | net-next | fdf5af0daf8019cec2396cdef8fb042d80fe71fa | 2.31276420473574e+38 | 4 | tcp: drop SYN+FIN messages
Denys Fedoryshchenko reported that SYN+FIN attacks were bringing his
linux machines to their limits.
Dont call conn_request() if the TCP flags includes SYN flag
Reported-by: Denys Fedoryshchenko <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> | 0 |
static inline void stop_process_timers(struct signal_struct *sig)
{
struct thread_group_cputimer *cputimer = &sig->cputimer;
/* Turn off cputimer->running. This is done without locking. */
WRITE_ONCE(cputimer->running, false);
tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
} | Safe | [
"CWE-190"
] | linux | 78c9c4dfbf8c04883941445a195276bb4bb92c76 | 1.9981096443320803e+37 | 8 | posix-timers: Sanitize overrun handling
The posix timer overrun handling is broken because the forwarding functions
can return a huge number of overruns which does not fit in an int. As a
consequence timer_getoverrun(2) and siginfo::si_overrun can turn into
random number generators.
The k_clock::timer_forward() callbacks return a 64 bit value now. Make
k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal
accounting is correct. 3Remove the temporary (int) casts.
Add a helper function which clamps the overrun value returned to user space
via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value
between 0 and INT_MAX. INT_MAX is an indicator for user space that the
overrun value has been clamped.
Reported-by: Team OWL337 <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Michael Kerrisk <[email protected]>
Link: https://lkml.kernel.org/r/[email protected] | 0 |
static int avi_write_data(avi_t *AVI, char *data, unsigned int length, int audio, int keyframe)
{
int n = 0;
unsigned char astr[5];
// transcode core itself checks for the size -- unneeded and
// does harm to xvid 2pass encodes where the first pass can get
// _very_ large -- tibit.
#if 0
/* Check for maximum file length */
if ( (AVI->pos + 8 + length + 8 + (AVI->n_idx+1)*16) > AVI_MAX_LEN ) {
AVI_errno = AVI_ERR_SIZELIM;
return -1;
}
#endif
/* Add index entry */
//set tag for current audio track
sprintf((char *)astr, "0%1dwb", (int)(AVI->aptr+1));
if(audio) {
if (!AVI->is_opendml) n = avi_add_index_entry(AVI,astr,0x10,AVI->pos,length);
n += avi_add_odml_index_entry(AVI,astr,0x10,AVI->pos,length);
} else {
if (!AVI->is_opendml) n = avi_add_index_entry(AVI,(unsigned char *)"00db",((keyframe)?0x10:0x0),AVI->pos,length);
n += avi_add_odml_index_entry(AVI,(unsigned char *)"00db",((keyframe)?0x10:0x0),AVI->pos,length);
}
if(n) return -1;
/* Output tag and data */
if(audio)
n = avi_add_chunk(AVI,(unsigned char *)astr, (unsigned char *)data, length);
else
n = avi_add_chunk(AVI,(unsigned char *)"00db", (unsigned char *)data, length);
if (n) return -1;
return 0;
} | Safe | [
"CWE-835"
] | gpac | 7f060bbb72966cae80d6fee338d0b07fa3fc06e1 | 8.60219612066794e+37 | 45 | fixed #2159 | 0 |
void __fastcall TSCPFileSystem::SCPDirectorySource(const UnicodeString DirectoryName,
const UnicodeString TargetDir, const TCopyParamType * CopyParam, int Params,
TFileOperationProgressType * OperationProgress, int Level)
{
int Attrs;
FTerminal->LogEvent(FORMAT(L"Entering directory \"%s\".", (DirectoryName)));
OperationProgress->SetFile(DirectoryName);
UnicodeString DestFileName =
FTerminal->ChangeFileName(
CopyParam, ExtractFileName(DirectoryName), osLocal, Level == 0);
// Get directory attributes
FILE_OPERATION_LOOP_BEGIN
{
Attrs = FileGetAttrFix(ApiPath(DirectoryName));
if (Attrs < 0) RaiseLastOSError();
}
FILE_OPERATION_LOOP_END(FMTLOAD(CANT_GET_ATTRS, (DirectoryName)));
UnicodeString TargetDirFull = UnixIncludeTrailingBackslash(TargetDir + DestFileName);
UnicodeString Buf;
/* TODO 1: maybe send filetime */
// Send directory modes (rights), filesize and file name
Buf = FORMAT(L"D%s 0 %s",
(CopyParam->RemoteFileRights(Attrs).Octal, DestFileName));
FSecureShell->SendLine(Buf);
SCPResponse();
try
{
TSearchRecOwned SearchRec;
bool FindOK = FTerminal->LocalFindFirstLoop(IncludeTrailingBackslash(DirectoryName) + L"*.*", SearchRec);
while (FindOK && !OperationProgress->Cancel)
{
UnicodeString FileName = IncludeTrailingBackslash(DirectoryName) + SearchRec.Name;
try
{
if (SearchRec.IsRealFile())
{
SCPSource(FileName, TargetDirFull, CopyParam, Params, OperationProgress, Level + 1);
}
}
// Previously we caught ESkipFile, making error being displayed
// even when file was excluded by mask. Now the ESkipFile is special
// case without error message.
catch (EScpFileSkipped &E)
{
TQueryParams Params(qpAllowContinueOnError);
TSuspendFileOperationProgress Suspend(OperationProgress);
if (FTerminal->QueryUserException(FMTLOAD(COPY_ERROR, (FileName)), &E,
qaOK | qaAbort, &Params, qtError) == qaAbort)
{
OperationProgress->SetCancel(csCancel);
}
if (!FTerminal->HandleException(&E))
{
throw;
}
}
catch (ESkipFile &E)
{
// If ESkipFile occurs, just log it and continue with next file
TSuspendFileOperationProgress Suspend(OperationProgress);
if (!FTerminal->HandleException(&E))
{
throw;
}
}
FindOK = FTerminal->LocalFindNextLoop(SearchRec);
}
SearchRec.Close();
/* TODO : Delete also read-only directories. */
/* TODO : Show error message on failure. */
if (!OperationProgress->Cancel)
{
if (FLAGSET(Params, cpDelete))
{
RemoveDir(ApiPath(DirectoryName));
}
else if (CopyParam->ClearArchive && FLAGSET(Attrs, faArchive))
{
FILE_OPERATION_LOOP_BEGIN
{
THROWOSIFFALSE(FileSetAttr(ApiPath(DirectoryName), Attrs & ~faArchive) == 0);
}
FILE_OPERATION_LOOP_END(FMTLOAD(CANT_SET_ATTRS, (DirectoryName)));
}
}
}
__finally
{
if (FTerminal->Active)
{
// Tell remote side, that we're done.
FTerminal->LogEvent(FORMAT(L"Leaving directory \"%s\".", (DirectoryName)));
FSecureShell->SendLine(L"E");
SCPResponse();
}
}
}
| Safe | [
"CWE-20"
] | winscp | 49d876f2c5fc00bcedaa986a7cf6dedd6bf16f54 | 1.2783672833733267e+38 | 110 | Bug 1675: Prevent SCP server sending files that were not requested
https://winscp.net/tracker/1675
Source commit: 4aa587620973bf793fb6e783052277c0f7be4b55 | 0 |
static void free_ioctx_reqs(struct percpu_ref *ref)
{
struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
/* At this point we know that there are no any in-flight requests */
if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
complete(&ctx->rq_wait->comp);
/* Synchronize against RCU protected table->table[] dereferences */
INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
queue_rcu_work(system_wq, &ctx->free_rwork);
} | Safe | [
"CWE-416"
] | linux | 84c4e1f89fefe70554da0ab33be72c9be7994379 | 3.249819654698919e+36 | 12 | aio: simplify - and fix - fget/fput for io_submit()
Al Viro root-caused a race where the IOCB_CMD_POLL handling of
fget/fput() could cause us to access the file pointer after it had
already been freed:
"In more details - normally IOCB_CMD_POLL handling looks so:
1) io_submit(2) allocates aio_kiocb instance and passes it to
aio_poll()
2) aio_poll() resolves the descriptor to struct file by req->file =
fget(iocb->aio_fildes)
3) aio_poll() sets ->woken to false and raises ->ki_refcnt of that
aio_kiocb to 2 (bumps by 1, that is).
4) aio_poll() calls vfs_poll(). After sanity checks (basically,
"poll_wait() had been called and only once") it locks the queue.
That's what the extra reference to iocb had been for - we know we
can safely access it.
5) With queue locked, we check if ->woken has already been set to
true (by aio_poll_wake()) and, if it had been, we unlock the
queue, drop a reference to aio_kiocb and bugger off - at that
point it's a responsibility to aio_poll_wake() and the stuff
called/scheduled by it. That code will drop the reference to file
in req->file, along with the other reference to our aio_kiocb.
6) otherwise, we see whether we need to wait. If we do, we unlock the
queue, drop one reference to aio_kiocb and go away - eventual
wakeup (or cancel) will deal with the reference to file and with
the other reference to aio_kiocb
7) otherwise we remove ourselves from waitqueue (still under the
queue lock), so that wakeup won't get us. No async activity will
be happening, so we can safely drop req->file and iocb ourselves.
If wakeup happens while we are in vfs_poll(), we are fine - aio_kiocb
won't get freed under us, so we can do all the checks and locking
safely. And we don't touch ->file if we detect that case.
However, vfs_poll() most certainly *does* touch the file it had been
given. So wakeup coming while we are still in ->poll() might end up
doing fput() on that file. That case is not too rare, and usually we
are saved by the still present reference from descriptor table - that
fput() is not the final one.
But if another thread closes that descriptor right after our fget()
and wakeup does happen before ->poll() returns, we are in trouble -
final fput() done while we are in the middle of a method:
Al also wrote a patch to take an extra reference to the file descriptor
to fix this, but I instead suggested we just streamline the whole file
pointer handling by submit_io() so that the generic aio submission code
simply keeps the file pointer around until the aio has completed.
Fixes: bfe4037e722e ("aio: implement IOCB_CMD_POLL")
Acked-by: Al Viro <[email protected]>
Reported-by: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> | 0 |
server_client_dispatch_command(struct client *c, struct imsg *imsg)
{
struct msg_command_data data;
char *buf;
size_t len;
struct cmd_list *cmdlist = NULL;
int argc;
char **argv, *cause;
if (c->flags & CLIENT_EXIT)
return;
if (imsg->hdr.len - IMSG_HEADER_SIZE < sizeof data)
fatalx("bad MSG_COMMAND size");
memcpy(&data, imsg->data, sizeof data);
buf = (char *)imsg->data + sizeof data;
len = imsg->hdr.len - IMSG_HEADER_SIZE - sizeof data;
if (len > 0 && buf[len - 1] != '\0')
fatalx("bad MSG_COMMAND string");
argc = data.argc;
if (cmd_unpack_argv(buf, len, argc, &argv) != 0) {
cause = xstrdup("command too long");
goto error;
}
if (argc == 0) {
argc = 1;
argv = xcalloc(1, sizeof *argv);
*argv = xstrdup("new-session");
}
if ((cmdlist = cmd_list_parse(argc, argv, NULL, 0, &cause)) == NULL) {
cmd_free_argv(argc, argv);
goto error;
}
cmd_free_argv(argc, argv);
cmdq_append(c, cmdq_get_command(cmdlist, NULL, NULL, 0));
cmdq_append(c, cmdq_get_callback(server_client_command_done, NULL));
cmd_list_free(cmdlist);
return;
error:
cmdq_append(c, cmdq_get_callback(server_client_command_error, cause));
if (cmdlist != NULL)
cmd_list_free(cmdlist);
c->flags |= CLIENT_EXIT;
} | Safe | [] | src | b32e1d34e10a0da806823f57f02a4ae6e93d756e | 7.109116216128935e+37 | 52 | evbuffer_new and bufferevent_new can both fail (when malloc fails) and
return NULL. GitHub issue 1547. | 0 |
gst_rtsp_watch_unref (GstRTSPWatch * watch)
{
g_return_if_fail (watch != NULL);
g_source_unref ((GSource *) watch);
} | Safe | [] | gst-plugins-base | f672277509705c4034bc92a141eefee4524d15aa | 8.1164064896409975e+37 | 6 | gstrtspconnection: Security loophole making heap overflow
The former code allowed an attacker to create a heap overflow by
sending a longer than allowed session id in a response and including a
semicolon to change the maximum length. With this change, the parser
will never go beyond 512 bytes. | 0 |
void TY_(AddCharToLexer)( Lexer *lexer, uint c )
{
int i, err, count = 0;
tmbchar buf[10] = {0};
err = TY_(EncodeCharToUTF8Bytes)( c, buf, NULL, &count );
if (err)
{
#if 0 && defined(_DEBUG)
fprintf( stderr, "lexer UTF-8 encoding error for U+%x : ", c );
#endif
/* replacement character 0xFFFD encoded as UTF-8 */
buf[0] = (byte) 0xEF;
buf[1] = (byte) 0xBF;
buf[2] = (byte) 0xBD;
count = 3;
}
for ( i = 0; i < count; ++i )
AddByte( lexer, buf[i] );
} | Safe | [
"CWE-119"
] | tidy-html5 | c18f27a58792f7fbd0b30a0ff50d6b40a82f940d | 3.3228214078991187e+38 | 21 | Issue #217 - avoid len going negative, ever... | 0 |
static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
{
} | Safe | [
"CWE-119"
] | linux | 29d6455178a09e1dc340380c582b13356227e8df | 2.9426155248543807e+38 | 3 | sched: panic on corrupted stack end
Until now, hitting this BUG_ON caused a recursive oops (because oops
handling involves do_exit(), which calls into the scheduler, which in
turn raises an oops), which caused stuff below the stack to be
overwritten until a panic happened (e.g. via an oops in interrupt
context, caused by the overwritten CPU index in the thread_info).
Just panic directly.
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> | 0 |
static int do_decode(unsigned char *bytes, long nbytes,
const EXPECTED *expected, size_t expected_size,
const TEST_PACKAGE *package)
{
EXPECTED *enctst = NULL;
const unsigned char *start;
int ret = 0;
start = bytes;
enctst = package->d2i(NULL, &bytes, nbytes);
if (enctst == NULL) {
if (expected->success == 0) {
ret = 1;
ERR_clear_error();
} else {
ret = -1;
}
} else {
if (start + nbytes == bytes
&& memcmp(enctst, expected, expected_size) == 0)
ret = 1;
else
ret = 0;
}
package->ifree(enctst);
return ret;
} | Safe | [
"CWE-476"
] | openssl | 22b88fc9c0e22545401c0b34d24843883ea73fec | 1.5675222430018234e+38 | 28 | Add a test for encoding/decoding using an invalid ASN.1 Template
If you have a CHOICE type that it must use explicit tagging - otherwise
the template is invalid. We add tests for this.
Reviewed-by: Tomas Mraz <[email protected]> | 0 |
k5_asn1_decode_uint(const uint8_t *asn1, size_t len, uintmax_t *val)
{
uintmax_t n;
size_t i;
if (len == 0)
return ASN1_BAD_LENGTH;
/* Check for negative values and check length. */
if ((asn1[0] & 0x80) || len > sizeof(uintmax_t) + (asn1[0] == 0))
return ASN1_OVERFLOW;
for (i = 0, n = 0; i < len; i++)
n = (n << 8) | asn1[i];
*val = n;
return 0;
} | Safe | [
"CWE-674",
"CWE-787"
] | krb5 | 57415dda6cf04e73ffc3723be518eddfae599bfd | 1.6665157377729325e+38 | 15 | Add recursion limit for ASN.1 indefinite lengths
The libkrb5 ASN.1 decoder supports BER indefinite lengths. It
computes the tag length using recursion; the lack of a recursion limit
allows an attacker to overrun the stack and cause the process to
crash. Reported by Demi Obenour.
CVE-2020-28196:
In MIT krb5 releases 1.11 and later, an unauthenticated attacker can
cause a denial of service for any client or server to which it can
send an ASN.1-encoded Kerberos message of sufficient length.
ticket: 8959 (new)
tags: pullup
target_version: 1.18-next
target_version: 1.17-next | 0 |
static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
const struct sctp_association *asoc,
const struct sctp_chunk *chunk)
{
struct sctp_packet *packet;
struct sctp_transport *transport;
__u16 sport;
__u16 dport;
__u32 vtag;
/* Get the source and destination port from the inbound packet. */
sport = ntohs(chunk->sctp_hdr->dest);
dport = ntohs(chunk->sctp_hdr->source);
/* The V-tag is going to be the same as the inbound packet if no
* association exists, otherwise, use the peer's vtag.
*/
if (asoc) {
/* Special case the INIT-ACK as there is no peer's vtag
* yet.
*/
switch (chunk->chunk_hdr->type) {
case SCTP_CID_INIT_ACK:
{
sctp_initack_chunk_t *initack;
initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
vtag = ntohl(initack->init_hdr.init_tag);
break;
}
default:
vtag = asoc->peer.i.init_tag;
break;
}
} else {
/* Special case the INIT and stale COOKIE_ECHO as there is no
* vtag yet.
*/
switch (chunk->chunk_hdr->type) {
case SCTP_CID_INIT:
{
sctp_init_chunk_t *init;
init = (sctp_init_chunk_t *)chunk->chunk_hdr;
vtag = ntohl(init->init_hdr.init_tag);
break;
}
default:
vtag = ntohl(chunk->sctp_hdr->vtag);
break;
}
}
/* Make a transport for the bucket, Eliza... */
transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC);
if (!transport)
goto nomem;
/* Cache a route for the transport with the chunk's destination as
* the source address.
*/
sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
sctp_sk(net->sctp.ctl_sock));
packet = sctp_packet_init(&transport->packet, transport, sport, dport);
packet = sctp_packet_config(packet, vtag, 0);
return packet;
nomem:
return NULL;
} | Safe | [
"CWE-20",
"CWE-476"
] | linux | ec0223ec48a90cb605244b45f7c62de856403729 | 2.1829609883569297e+38 | 72 | net: sctp: fix sctp_sf_do_5_1D_ce to verify if we/peer is AUTH capable
RFC4895 introduced AUTH chunks for SCTP; during the SCTP
handshake RANDOM; CHUNKS; HMAC-ALGO are negotiated (CHUNKS
being optional though):
---------- INIT[RANDOM; CHUNKS; HMAC-ALGO] ---------->
<------- INIT-ACK[RANDOM; CHUNKS; HMAC-ALGO] ---------
-------------------- COOKIE-ECHO -------------------->
<-------------------- COOKIE-ACK ---------------------
A special case is when an endpoint requires COOKIE-ECHO
chunks to be authenticated:
---------- INIT[RANDOM; CHUNKS; HMAC-ALGO] ---------->
<------- INIT-ACK[RANDOM; CHUNKS; HMAC-ALGO] ---------
------------------ AUTH; COOKIE-ECHO ---------------->
<-------------------- COOKIE-ACK ---------------------
RFC4895, section 6.3. Receiving Authenticated Chunks says:
The receiver MUST use the HMAC algorithm indicated in
the HMAC Identifier field. If this algorithm was not
specified by the receiver in the HMAC-ALGO parameter in
the INIT or INIT-ACK chunk during association setup, the
AUTH chunk and all the chunks after it MUST be discarded
and an ERROR chunk SHOULD be sent with the error cause
defined in Section 4.1. [...] If no endpoint pair shared
key has been configured for that Shared Key Identifier,
all authenticated chunks MUST be silently discarded. [...]
When an endpoint requires COOKIE-ECHO chunks to be
authenticated, some special procedures have to be followed
because the reception of a COOKIE-ECHO chunk might result
in the creation of an SCTP association. If a packet arrives
containing an AUTH chunk as a first chunk, a COOKIE-ECHO
chunk as the second chunk, and possibly more chunks after
them, and the receiver does not have an STCB for that
packet, then authentication is based on the contents of
the COOKIE-ECHO chunk. In this situation, the receiver MUST
authenticate the chunks in the packet by using the RANDOM
parameters, CHUNKS parameters and HMAC_ALGO parameters
obtained from the COOKIE-ECHO chunk, and possibly a local
shared secret as inputs to the authentication procedure
specified in Section 6.3. If authentication fails, then
the packet is discarded. If the authentication is successful,
the COOKIE-ECHO and all the chunks after the COOKIE-ECHO
MUST be processed. If the receiver has an STCB, it MUST
process the AUTH chunk as described above using the STCB
from the existing association to authenticate the
COOKIE-ECHO chunk and all the chunks after it. [...]
Commit bbd0d59809f9 introduced the possibility to receive
and verification of AUTH chunk, including the edge case for
authenticated COOKIE-ECHO. On reception of COOKIE-ECHO,
the function sctp_sf_do_5_1D_ce() handles processing,
unpacks and creates a new association if it passed sanity
checks and also tests for authentication chunks being
present. After a new association has been processed, it
invokes sctp_process_init() on the new association and
walks through the parameter list it received from the INIT
chunk. It checks SCTP_PARAM_RANDOM, SCTP_PARAM_HMAC_ALGO
and SCTP_PARAM_CHUNKS, and copies them into asoc->peer
meta data (peer_random, peer_hmacs, peer_chunks) in case
sysctl -w net.sctp.auth_enable=1 is set. If in INIT's
SCTP_PARAM_SUPPORTED_EXT parameter SCTP_CID_AUTH is set,
peer_random != NULL and peer_hmacs != NULL the peer is to be
assumed asoc->peer.auth_capable=1, in any other case
asoc->peer.auth_capable=0.
Now, if in sctp_sf_do_5_1D_ce() chunk->auth_chunk is
available, we set up a fake auth chunk and pass that on to
sctp_sf_authenticate(), which at latest in
sctp_auth_calculate_hmac() reliably dereferences a NULL pointer
at position 0..0008 when setting up the crypto key in
crypto_hash_setkey() by using asoc->asoc_shared_key that is
NULL as condition key_id == asoc->active_key_id is true if
the AUTH chunk was injected correctly from remote. This
happens no matter what net.sctp.auth_enable sysctl says.
The fix is to check for net->sctp.auth_enable and for
asoc->peer.auth_capable before doing any operations like
sctp_sf_authenticate() as no key is activated in
sctp_auth_asoc_init_active_key() for each case.
Now as RFC4895 section 6.3 states that if the used HMAC-ALGO
passed from the INIT chunk was not used in the AUTH chunk, we
SHOULD send an error; however in this case it would be better
to just silently discard such a maliciously prepared handshake
as we didn't even receive a parameter at all. Also, as our
endpoint has no shared key configured, section 6.3 says that
MUST silently discard, which we are doing from now onwards.
Before calling sctp_sf_pdiscard(), we need not only to free
the association, but also the chunk->auth_chunk skb, as
commit bbd0d59809f9 created a skb clone in that case.
I have tested this locally by using netfilter's nfqueue and
re-injecting packets into the local stack after maliciously
modifying the INIT chunk (removing RANDOM; HMAC-ALGO param)
and the SCTP packet containing the COOKIE_ECHO (injecting
AUTH chunk before COOKIE_ECHO). Fixed with this patch applied.
Fixes: bbd0d59809f9 ("[SCTP]: Implement the receive and verification of AUTH chunk")
Signed-off-by: Daniel Borkmann <[email protected]>
Cc: Vlad Yasevich <[email protected]>
Cc: Neil Horman <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> | 0 |
GF_Err gf_isom_remove_edit(GF_ISOFile *movie, u32 trackNumber, u32 seg_index)
{
GF_Err e;
GF_TrackBox *trak;
GF_EdtsEntry *ent, *next_ent;
trak = gf_isom_get_track_from_file(movie, trackNumber);
if (!trak || !seg_index) return GF_BAD_PARAM;
e = CanAccessMovie(movie, GF_ISOM_OPEN_WRITE);
if (e) return e;
if (!trak->editBox || !trak->editBox->editList) return GF_OK;
if (gf_list_count(trak->editBox->editList->entryList)<=1) return gf_isom_remove_edits(movie, trackNumber);
ent = (GF_EdtsEntry*) gf_list_get(trak->editBox->editList->entryList, seg_index-1);
gf_list_rem(trak->editBox->editList->entryList, seg_index-1);
next_ent = (GF_EdtsEntry *)gf_list_get(trak->editBox->editList->entryList, seg_index-1);
if (next_ent) next_ent->segmentDuration += ent->segmentDuration;
gf_free(ent);
return SetTrackDuration(trak);
} | Safe | [
"CWE-476"
] | gpac | ebfa346eff05049718f7b80041093b4c5581c24e | 2.3637239582710946e+38 | 21 | fixed #1706 | 0 |
void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
{
int i;
u16 j;
struct hc_sp_status_block_data sp_sb_data;
int func = BP_FUNC(bp);
#ifdef BNX2X_STOP_ON_ERROR
u16 start = 0, end = 0;
u8 cos;
#endif
if (IS_PF(bp) && disable_int)
bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED;
bp->eth_stats.unrecoverable_error++;
DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
BNX2X_ERR("begin crash dump -----------------\n");
/* Indices */
/* Common */
if (IS_PF(bp)) {
struct host_sp_status_block *def_sb = bp->def_status_blk;
int data_size, cstorm_offset;
BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
bp->def_idx, bp->def_att_idx, bp->attn_state,
bp->spq_prod_idx, bp->stats_counter);
BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
def_sb->atten_status_block.attn_bits,
def_sb->atten_status_block.attn_bits_ack,
def_sb->atten_status_block.status_block_id,
def_sb->atten_status_block.attn_bits_index);
BNX2X_ERR(" def (");
for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
pr_cont("0x%x%s",
def_sb->sp_sb.index_values[i],
(i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
data_size = sizeof(struct hc_sp_status_block_data) /
sizeof(u32);
cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
for (i = 0; i < data_size; i++)
*((u32 *)&sp_sb_data + i) =
REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
i * sizeof(u32));
pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
sp_sb_data.igu_sb_id,
sp_sb_data.igu_seg_id,
sp_sb_data.p_func.pf_id,
sp_sb_data.p_func.vnic_id,
sp_sb_data.p_func.vf_id,
sp_sb_data.p_func.vf_valid,
sp_sb_data.state);
}
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
int loop;
struct hc_status_block_data_e2 sb_data_e2;
struct hc_status_block_data_e1x sb_data_e1x;
struct hc_status_block_sm *hc_sm_p =
CHIP_IS_E1x(bp) ?
sb_data_e1x.common.state_machine :
sb_data_e2.common.state_machine;
struct hc_index_data *hc_index_p =
CHIP_IS_E1x(bp) ?
sb_data_e1x.index_data :
sb_data_e2.index_data;
u8 data_size, cos;
u32 *sb_data_p;
struct bnx2x_fp_txdata txdata;
if (!bp->fp)
break;
if (!fp->rx_cons_sb)
continue;
/* Rx */
BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
i, fp->rx_bd_prod, fp->rx_bd_cons,
fp->rx_comp_prod,
fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
fp->rx_sge_prod, fp->last_max_sge,
le16_to_cpu(fp->fp_hc_idx));
/* Tx */
for_each_cos_in_tx_queue(fp, cos)
{
if (!fp->txdata_ptr[cos])
break;
txdata = *fp->txdata_ptr[cos];
if (!txdata.tx_cons_sb)
continue;
BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
i, txdata.tx_pkt_prod,
txdata.tx_pkt_cons, txdata.tx_bd_prod,
txdata.tx_bd_cons,
le16_to_cpu(*txdata.tx_cons_sb));
}
loop = CHIP_IS_E1x(bp) ?
HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
/* host sb data */
if (IS_FCOE_FP(fp))
continue;
BNX2X_ERR(" run indexes (");
for (j = 0; j < HC_SB_MAX_SM; j++)
pr_cont("0x%x%s",
fp->sb_running_index[j],
(j == HC_SB_MAX_SM - 1) ? ")" : " ");
BNX2X_ERR(" indexes (");
for (j = 0; j < loop; j++)
pr_cont("0x%x%s",
fp->sb_index_values[j],
(j == loop - 1) ? ")" : " ");
/* VF cannot access FW refelection for status block */
if (IS_VF(bp))
continue;
/* fw sb data */
data_size = CHIP_IS_E1x(bp) ?
sizeof(struct hc_status_block_data_e1x) :
sizeof(struct hc_status_block_data_e2);
data_size /= sizeof(u32);
sb_data_p = CHIP_IS_E1x(bp) ?
(u32 *)&sb_data_e1x :
(u32 *)&sb_data_e2;
/* copy sb data in here */
for (j = 0; j < data_size; j++)
*(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
j * sizeof(u32));
if (!CHIP_IS_E1x(bp)) {
pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
sb_data_e2.common.p_func.pf_id,
sb_data_e2.common.p_func.vf_id,
sb_data_e2.common.p_func.vf_valid,
sb_data_e2.common.p_func.vnic_id,
sb_data_e2.common.same_igu_sb_1b,
sb_data_e2.common.state);
} else {
pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
sb_data_e1x.common.p_func.pf_id,
sb_data_e1x.common.p_func.vf_id,
sb_data_e1x.common.p_func.vf_valid,
sb_data_e1x.common.p_func.vnic_id,
sb_data_e1x.common.same_igu_sb_1b,
sb_data_e1x.common.state);
}
/* SB_SMs data */
for (j = 0; j < HC_SB_MAX_SM; j++) {
pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
j, hc_sm_p[j].__flags,
hc_sm_p[j].igu_sb_id,
hc_sm_p[j].igu_seg_id,
hc_sm_p[j].time_to_expire,
hc_sm_p[j].timer_value);
}
/* Indices data */
for (j = 0; j < loop; j++) {
pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
hc_index_p[j].flags,
hc_index_p[j].timeout);
}
}
#ifdef BNX2X_STOP_ON_ERROR
if (IS_PF(bp)) {
/* event queue */
BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
for (i = 0; i < NUM_EQ_DESC; i++) {
u32 *data = (u32 *)&bp->eq_ring[i].message.data;
BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
i, bp->eq_ring[i].message.opcode,
bp->eq_ring[i].message.error);
BNX2X_ERR("data: %x %x %x\n",
data[0], data[1], data[2]);
}
}
/* Rings */
/* Rx */
for_each_valid_rx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
if (!bp->fp)
break;
if (!fp->rx_cons_sb)
continue;
start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
for (j = start; j != end; j = RX_BD(j + 1)) {
u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
i, j, rx_bd[1], rx_bd[0], sw_bd->data);
}
start = RX_SGE(fp->rx_sge_prod);
end = RX_SGE(fp->last_max_sge);
for (j = start; j != end; j = RX_SGE(j + 1)) {
u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
i, j, rx_sge[1], rx_sge[0], sw_page->page);
}
start = RCQ_BD(fp->rx_comp_cons - 10);
end = RCQ_BD(fp->rx_comp_cons + 503);
for (j = start; j != end; j = RCQ_BD(j + 1)) {
u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
}
}
/* Tx */
for_each_valid_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
if (!bp->fp)
break;
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
if (!fp->txdata_ptr[cos])
break;
if (!txdata->tx_cons_sb)
continue;
start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
for (j = start; j != end; j = TX_BD(j + 1)) {
struct sw_tx_bd *sw_bd =
&txdata->tx_buf_ring[j];
BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
i, cos, j, sw_bd->skb,
sw_bd->first_bd);
}
start = TX_BD(txdata->tx_bd_cons - 10);
end = TX_BD(txdata->tx_bd_cons + 254);
for (j = start; j != end; j = TX_BD(j + 1)) {
u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
i, cos, j, tx_bd[0], tx_bd[1],
tx_bd[2], tx_bd[3]);
}
}
}
#endif
if (IS_PF(bp)) {
bnx2x_fw_dump(bp);
bnx2x_mc_assert(bp);
}
BNX2X_ERR("end crash dump -----------------\n");
} | Safe | [
"CWE-20"
] | linux | 8914a595110a6eca69a5e275b323f5d09e18f4f9 | 3.2961433495817287e+38 | 282 | bnx2x: disable GSO where gso_size is too big for hardware
If a bnx2x card is passed a GSO packet with a gso_size larger than
~9700 bytes, it will cause a firmware error that will bring the card
down:
bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert!
bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2
bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052
bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1
... (dump of values continues) ...
Detect when the mac length of a GSO packet is greater than the maximum
packet size (9700 bytes) and disable GSO.
Signed-off-by: Daniel Axtens <[email protected]>
Reviewed-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> | 0 |
int ssl3_change_cipher_state(SSL *s, int which)
{
unsigned char *p,*mac_secret;
unsigned char exp_key[EVP_MAX_KEY_LENGTH];
unsigned char exp_iv[EVP_MAX_IV_LENGTH];
unsigned char *ms,*key,*iv,*er1,*er2;
EVP_CIPHER_CTX *dd;
const EVP_CIPHER *c;
#ifndef OPENSSL_NO_COMP
COMP_METHOD *comp;
#endif
const EVP_MD *m;
EVP_MD_CTX md;
int is_exp,n,i,j,k,cl;
int reuse_dd = 0;
is_exp=SSL_C_IS_EXPORT(s->s3->tmp.new_cipher);
c=s->s3->tmp.new_sym_enc;
m=s->s3->tmp.new_hash;
/* m == NULL will lead to a crash later */
OPENSSL_assert(m);
#ifndef OPENSSL_NO_COMP
if (s->s3->tmp.new_compression == NULL)
comp=NULL;
else
comp=s->s3->tmp.new_compression->method;
#endif
if (which & SSL3_CC_READ)
{
if (s->enc_read_ctx != NULL)
reuse_dd = 1;
else if ((s->enc_read_ctx=OPENSSL_malloc(sizeof(EVP_CIPHER_CTX))) == NULL)
goto err;
else
/* make sure it's intialized in case we exit later with an error */
EVP_CIPHER_CTX_init(s->enc_read_ctx);
dd= s->enc_read_ctx;
ssl_replace_hash(&s->read_hash,m);
#ifndef OPENSSL_NO_COMP
/* COMPRESS */
if (s->expand != NULL)
{
COMP_CTX_free(s->expand);
s->expand=NULL;
}
if (comp != NULL)
{
s->expand=COMP_CTX_new(comp);
if (s->expand == NULL)
{
SSLerr(SSL_F_SSL3_CHANGE_CIPHER_STATE,SSL_R_COMPRESSION_LIBRARY_ERROR);
goto err2;
}
if (s->s3->rrec.comp == NULL)
s->s3->rrec.comp=(unsigned char *)
OPENSSL_malloc(SSL3_RT_MAX_PLAIN_LENGTH);
if (s->s3->rrec.comp == NULL)
goto err;
}
#endif
memset(&(s->s3->read_sequence[0]),0,8);
mac_secret= &(s->s3->read_mac_secret[0]);
}
else
{
if (s->enc_write_ctx != NULL)
reuse_dd = 1;
else if ((s->enc_write_ctx=OPENSSL_malloc(sizeof(EVP_CIPHER_CTX))) == NULL)
goto err;
else
/* make sure it's intialized in case we exit later with an error */
EVP_CIPHER_CTX_init(s->enc_write_ctx);
dd= s->enc_write_ctx;
ssl_replace_hash(&s->write_hash,m);
#ifndef OPENSSL_NO_COMP
/* COMPRESS */
if (s->compress != NULL)
{
COMP_CTX_free(s->compress);
s->compress=NULL;
}
if (comp != NULL)
{
s->compress=COMP_CTX_new(comp);
if (s->compress == NULL)
{
SSLerr(SSL_F_SSL3_CHANGE_CIPHER_STATE,SSL_R_COMPRESSION_LIBRARY_ERROR);
goto err2;
}
}
#endif
memset(&(s->s3->write_sequence[0]),0,8);
mac_secret= &(s->s3->write_mac_secret[0]);
}
if (reuse_dd)
EVP_CIPHER_CTX_cleanup(dd);
p=s->s3->tmp.key_block;
i=EVP_MD_size(m);
if (i < 0)
goto err2;
cl=EVP_CIPHER_key_length(c);
j=is_exp ? (cl < SSL_C_EXPORT_KEYLENGTH(s->s3->tmp.new_cipher) ?
cl : SSL_C_EXPORT_KEYLENGTH(s->s3->tmp.new_cipher)) : cl;
/* Was j=(is_exp)?5:EVP_CIPHER_key_length(c); */
k=EVP_CIPHER_iv_length(c);
if ( (which == SSL3_CHANGE_CIPHER_CLIENT_WRITE) ||
(which == SSL3_CHANGE_CIPHER_SERVER_READ))
{
ms= &(p[ 0]); n=i+i;
key= &(p[ n]); n+=j+j;
iv= &(p[ n]); n+=k+k;
er1= &(s->s3->client_random[0]);
er2= &(s->s3->server_random[0]);
}
else
{
n=i;
ms= &(p[ n]); n+=i+j;
key= &(p[ n]); n+=j+k;
iv= &(p[ n]); n+=k;
er1= &(s->s3->server_random[0]);
er2= &(s->s3->client_random[0]);
}
if (n > s->s3->tmp.key_block_length)
{
SSLerr(SSL_F_SSL3_CHANGE_CIPHER_STATE,ERR_R_INTERNAL_ERROR);
goto err2;
}
EVP_MD_CTX_init(&md);
memcpy(mac_secret,ms,i);
if (is_exp)
{
/* In here I set both the read and write key/iv to the
* same value since only the correct one will be used :-).
*/
EVP_DigestInit_ex(&md,EVP_md5(), NULL);
EVP_DigestUpdate(&md,key,j);
EVP_DigestUpdate(&md,er1,SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md,er2,SSL3_RANDOM_SIZE);
EVP_DigestFinal_ex(&md,&(exp_key[0]),NULL);
key= &(exp_key[0]);
if (k > 0)
{
EVP_DigestInit_ex(&md,EVP_md5(), NULL);
EVP_DigestUpdate(&md,er1,SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md,er2,SSL3_RANDOM_SIZE);
EVP_DigestFinal_ex(&md,&(exp_iv[0]),NULL);
iv= &(exp_iv[0]);
}
}
s->session->key_arg_length=0;
EVP_CipherInit_ex(dd,c,NULL,key,iv,(which & SSL3_CC_WRITE));
OPENSSL_cleanse(&(exp_key[0]),sizeof(exp_key));
OPENSSL_cleanse(&(exp_iv[0]),sizeof(exp_iv));
EVP_MD_CTX_cleanup(&md);
return(1);
err:
SSLerr(SSL_F_SSL3_CHANGE_CIPHER_STATE,ERR_R_MALLOC_FAILURE);
err2:
return(0);
} | Safe | [
"CWE-310"
] | openssl | e5420be6cd09af2550b128575a675490cfba0483 | 1.2033426858993208e+38 | 171 | Make CBC decoding constant time.
This patch makes the decoding of SSLv3 and TLS CBC records constant
time. Without this, a timing side-channel can be used to build a padding
oracle and mount Vaudenay's attack.
This patch also disables the stitched AESNI+SHA mode pending a similar
fix to that code.
In order to be easy to backport, this change is implemented in ssl/,
rather than as a generic AEAD mode. In the future this should be changed
around so that HMAC isn't in ssl/, but crypto/ as FIPS expects.
(cherry picked from commit e130841bccfc0bb9da254dc84e23bc6a1c78a64e)
Conflicts:
crypto/evp/c_allc.c
ssl/ssl_algs.c
ssl/ssl_locl.h
ssl/t1_enc.c | 0 |
void FieldStore::add_trailer_token(const StringRef &name,
const StringRef &value, bool no_index,
int32_t token) {
// Header size limit should be applied to all header and trailer
// fields combined.
shrpx::add_header(buffer_size_, trailers_, name, value, no_index, token);
} | Safe | [] | nghttp2 | 319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c | 1.1614627929524952e+38 | 7 | nghttpx: Fix request stall
Fix request stall if backend connection is reused and buffer is full. | 0 |
Error(const std::string& text)
: ClientProtocol::Message("ERROR")
{
PushParam(text);
} | Safe | [
"CWE-200",
"CWE-732"
] | inspircd | 4350a11c663b0d75f8119743bffb7736d87abd4d | 3.5142928365041538e+37 | 5 | Fix sending malformed pong messages in some cases. | 0 |
static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
unsigned blocksize = inode->i_sb->s_blocksize;
unsigned bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
bool decrypt = false;
BUG_ON(!PageLocked(page));
BUG_ON(from > PAGE_SIZE);
BUG_ON(to > PAGE_SIZE);
BUG_ON(from > to);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
head = page_buffers(page);
bbits = ilog2(blocksize);
block = (sector_t)page->index << (PAGE_SHIFT - bbits);
for (bh = head, block_start = 0; bh != head || !block_start;
block++, block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
}
continue;
}
if (buffer_new(bh))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
break;
if (buffer_new(bh)) {
clean_bdev_bh_alias(bh);
if (PageUptodate(page)) {
clear_buffer_new(bh);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from)
zero_user_segments(page, to, block_end,
block_start, from);
continue;
}
}
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
continue;
}
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode);
}
}
/*
* If we issued read requests, let them complete.
*/
while (wait_bh > wait) {
wait_on_buffer(*--wait_bh);
if (!buffer_uptodate(*wait_bh))
err = -EIO;
}
if (unlikely(err))
page_zero_new_buffers(page, from, to);
else if (decrypt)
err = fscrypt_decrypt_page(page->mapping->host, page,
PAGE_SIZE, 0, page->index);
return err;
} | Safe | [] | linux | 8e4b5eae5decd9dfe5a4ee369c22028f90ab4c44 | 3.119029152239049e+38 | 85 | ext4: fail ext4_iget for root directory if unallocated
If the root directory has an i_links_count of zero, then when the file
system is mounted, then when ext4_fill_super() notices the problem and
tries to call iput() the root directory in the error return path,
ext4_evict_inode() will try to free the inode on disk, before all of
the file system structures are set up, and this will result in an OOPS
caused by a NULL pointer dereference.
This issue has been assigned CVE-2018-1092.
https://bugzilla.kernel.org/show_bug.cgi?id=199179
https://bugzilla.redhat.com/show_bug.cgi?id=1560777
Reported-by: Wen Xu <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected] | 0 |
static void fromRGB(unsigned char *src, unsigned char *dst, int width,
int pitch, int height, int pixelFormat)
{
switch(pixelFormat)
{
case TJPF_RGB:
#if RGB_RED!=0 || RGB_GREEN!=1 || RGB_BLUE!=2 || RGB_PIXELSIZE!=3
FROMRGB(3, 0, 1, 2,);
#endif
break;
case TJPF_BGR:
#if RGB_RED!=2 || RGB_GREEN!=1 || RGB_BLUE!=0 || RGB_PIXELSIZE!=3
FROMRGB(3, 2, 1, 0,);
#endif
break;
case TJPF_RGBX:
#if RGB_RED!=0 || RGB_GREEN!=1 || RGB_BLUE!=2 || RGB_PIXELSIZE!=4
FROMRGB(4, 0, 1, 2,);
#endif
break;
case TJPF_RGBA:
#if RGB_RED!=0 || RGB_GREEN!=1 || RGB_BLUE!=2 || RGB_PIXELSIZE!=4
FROMRGB(4, 0, 1, 2, dst[3]=0xFF;);
#endif
break;
case TJPF_BGRX:
#if RGB_RED!=2 || RGB_GREEN!=1 || RGB_BLUE!=0 || RGB_PIXELSIZE!=4
FROMRGB(4, 2, 1, 0,);
#endif
break;
case TJPF_BGRA:
#if RGB_RED!=2 || RGB_GREEN!=1 || RGB_BLUE!=0 || RGB_PIXELSIZE!=4
FROMRGB(4, 2, 1, 0, dst[3]=0xFF;); return;
#endif
break;
case TJPF_XRGB:
#if RGB_RED!=1 || RGB_GREEN!=2 || RGB_BLUE!=3 || RGB_PIXELSIZE!=4
FROMRGB(4, 1, 2, 3,); return;
#endif
break;
case TJPF_ARGB:
#if RGB_RED!=1 || RGB_GREEN!=2 || RGB_BLUE!=3 || RGB_PIXELSIZE!=4
FROMRGB(4, 1, 2, 3, dst[0]=0xFF;); return;
#endif
break;
case TJPF_XBGR:
#if RGB_RED!=3 || RGB_GREEN!=2 || RGB_BLUE!=1 || RGB_PIXELSIZE!=4
FROMRGB(4, 3, 2, 1,); return;
#endif
break;
case TJPF_ABGR:
#if RGB_RED!=3 || RGB_GREEN!=2 || RGB_BLUE!=1 || RGB_PIXELSIZE!=4
FROMRGB(4, 3, 2, 1, dst[0]=0xFF;); return;
#endif
break;
}
} | Safe | [] | libjpeg-turbo | dab6be4cfb2f9307b5378d2d1dc74d9080383dc2 | 2.9821911030909858e+38 | 57 | tjDecompressToYUV*(): Fix OOB write/double free
... when attempting to decompress grayscale JPEG images with sampling
factors != 1.
Fixes #387 | 0 |
smtp_proceed_help(struct smtp_session *s, const char *args)
{
const char *code = esc_code(ESC_STATUS_OK, ESC_OTHER_STATUS);
smtp_reply(s, "214-%s This is " SMTPD_NAME, code);
smtp_reply(s, "214-%s To report bugs in the implementation, "
"please contact [email protected]", code);
smtp_reply(s, "214-%s with full details", code);
smtp_reply(s, "214 %s End of HELP info", code);
} | Safe | [
"CWE-78",
"CWE-252"
] | src | 9dcfda045474d8903224d175907bfc29761dcb45 | 2.4749059648799807e+37 | 10 | Fix a security vulnerability discovered by Qualys which can lead to a
privileges escalation on mbox deliveries and unprivileged code execution
on lmtp deliveries, due to a logic issue causing a sanity check to be
missed.
ok eric@, millert@ | 0 |
XkbSizeGeomSections(XkbGeometryPtr geom)
{
register int i, size;
XkbSectionPtr section;
for (i = size = 0, section = geom->sections; i < geom->num_sections;
i++, section++) {
size += SIZEOF(xkbSectionWireDesc);
if (section->rows) {
int r;
XkbRowPtr row;
for (r = 0, row = section->rows; r < section->num_rows; row++, r++) {
size += SIZEOF(xkbRowWireDesc);
size += row->num_keys * SIZEOF(xkbKeyWireDesc);
}
}
if (section->doodads)
size += XkbSizeGeomDoodads(section->num_doodads, section->doodads);
if (section->overlays) {
int o;
XkbOverlayPtr ol;
for (o = 0, ol = section->overlays; o < section->num_overlays;
o++, ol++) {
int r;
XkbOverlayRowPtr row;
size += SIZEOF(xkbOverlayWireDesc);
for (r = 0, row = ol->rows; r < ol->num_rows; r++, row++) {
size += SIZEOF(xkbOverlayRowWireDesc);
size += row->num_keys * SIZEOF(xkbOverlayKeyWireDesc);
}
}
}
}
return size;
} | Safe | [
"CWE-119"
] | xserver | f7cd1276bbd4fe3a9700096dec33b52b8440788d | 2.3640715362122838e+38 | 38 | Correct bounds checking in XkbSetNames()
CVE-2020-14345 / ZDI 11428
This vulnerability was discovered by:
Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
Signed-off-by: Matthieu Herrb <[email protected]> | 0 |
R_API void r_bin_java_print_code_attr_summary(RBinJavaAttrInfo *attr) {
RListIter *iter = NULL, *iter_tmp = NULL;
RBinJavaExceptionEntry *exc_entry = NULL;
RBinJavaAttrInfo *_attr = NULL;
if (!attr) {
eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Code.\n");
return;
}
printf ("Code Attribute Information:\n");
printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset);
printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name);
printf (" Attribute Length: %d, Attribute Count: %d\n", attr->length, attr->info.code_attr.attributes_count);
printf (" Max Stack: %d\n", attr->info.code_attr.max_stack);
printf (" Max Locals: %d\n", attr->info.code_attr.max_locals);
printf (" Code Length: %d\n", attr->info.code_attr.code_length);
printf (" Code At Offset: 0x%08"PFMT64x "\n", (ut64) attr->info.code_attr.code_offset);
printf ("Code Attribute Exception Table Information:\n");
printf (" Exception Table Length: %d\n", attr->info.code_attr.exception_table_length);
if (attr->info.code_attr.exception_table) {
// Delete the attr entries
r_list_foreach_safe (attr->info.code_attr.exception_table, iter, iter_tmp, exc_entry) {
r_bin_java_print_code_exceptions_attr_summary (exc_entry);
}
}
printf (" Implicit Method Stack Frame:\n");
r_bin_java_print_stack_map_frame_summary (attr->info.code_attr.implicit_frame);
printf ("Code Attribute Attributes Information:\n");
if (attr->info.code_attr.attributes && attr->info.code_attr.attributes_count > 0) {
printf (" Code Attribute Attributes Count: %d\n", attr->info.code_attr.attributes_count);
r_list_foreach_safe (attr->info.code_attr.attributes, iter, iter_tmp, _attr) {
r_bin_java_print_attr_summary (_attr);
}
}
} | Safe | [
"CWE-119",
"CWE-788"
] | radare2 | 6c4428f018d385fc80a33ecddcb37becea685dd5 | 1.5993930559690538e+38 | 34 | Improve boundary checks to fix oobread segfaults ##crash
* Reported by Cen Zhang via huntr.dev
* Reproducer: bins/fuzzed/javaoob-havoc.class | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.