2019-04-24 05:12:16 +08:00
|
|
|
/* Interface to hashtable implementations.
|
2020-01-01 15:57:01 +08:00
|
|
|
Copyright (C) 2006-2020 Free Software Foundation, Inc.
|
2019-04-24 05:12:16 +08:00
|
|
|
|
|
|
|
This file is part of libctf.
|
|
|
|
|
|
|
|
libctf is free software; you can redistribute it and/or modify it under
|
|
|
|
the terms of the GNU General Public License as published by the Free
|
|
|
|
Software Foundation; either version 3, or (at your option) any later
|
|
|
|
version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful, but
|
|
|
|
WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
|
|
See the GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; see the file COPYING. If not see
|
|
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
|
|
|
|
#include <ctf-impl.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include "libiberty.h"
|
|
|
|
#include "hashtab.h"
|
|
|
|
|
libctf, hash: introduce the ctf_dynset
There are many places in the deduplicator which use hashtables as tiny
sets: keys with no value (and usually, but not always, no freeing
function) often with only one or a few members. For each of these, even
after the last change to not store the freeing functions, we are storing
a little malloced block for each item just to track the key/value pair,
and a little malloced block for the hash table itself just to track the
freeing function because we can't use libiberty hashtab's freeing
function because we are using that to free the little malloced per-item
block.
If we only have a key, we don't need any of that: we can ditch the
per-malloced block because we don't have a value, and we can ditch the
per-hashtab structure because we don't need to independently track the
freeing functions since libiberty hashtab is doing it for us. That
means we don't need an owner field in the (now nonexistent) item block
either.
Roughly speaking, this datatype saves about 25% in time and 20% in peak
memory usage for normal links, even fairly big ones. So this might seem
redundant, but it's really worth it.
Instead of a _lookup function, a dynset has two distinct functions:
ctf_dynset_exists, which returns true or false and an optional pointer
to the set member, and ctf_dynhash_lookup_any, which is used if all
members of the set are expected to be equivalent and we just want *any*
member and we don't care which one.
There is no iterator in this set of functions, not because we don't
iterate over dynset members -- we do, a lot -- but because the iterator
here is a member of an entirely new family of much more convenient
iteration functions, introduced in the next commit.
libctf/
* ctf-hash.c (ctf_dynset_eq_string): New.
(ctf_dynset_create): New.
(DYNSET_EMPTY_ENTRY_REPLACEMENT): New.
(DYNSET_DELETED_ENTRY_REPLACEMENT): New.
(key_to_internal): New.
(internal_to_key): New.
(ctf_dynset_insert): New.
(ctf_dynset_remove): New.
(ctf_dynset_destroy): New.
(ctf_dynset_lookup): New.
(ctf_dynset_exists): New.
(ctf_dynset_lookup_any): New.
(ctf_hash_insert_type): Coding style.
(ctf_hash_define_type): Likewise.
* ctf-impl.h (ctf_dynset_t): New.
(ctf_dynset_eq_string): New.
(ctf_dynset_create): New.
(ctf_dynset_insert): New.
(ctf_dynset_remove): New.
(ctf_dynset_destroy): New.
(ctf_dynset_lookup): New.
(ctf_dynset_exists): New.
(ctf_dynset_lookup_any): New.
* ctf-inlines.h (ctf_dynset_cinsert): New.
2020-06-03 05:26:38 +08:00
|
|
|
/* We have three hashtable implementations:
|
|
|
|
|
|
|
|
- ctf_hash_* is an interface to a fixed-size hash from const char * ->
|
|
|
|
ctf_id_t with number of elements specified at creation time, that should
|
|
|
|
support addition of items but need not support removal.
|
|
|
|
|
|
|
|
- ctf_dynhash_* is an interface to a dynamically-expanding hash with
|
|
|
|
unknown size that should support addition of large numbers of items, and
|
|
|
|
removal as well, and is used only at type-insertion time and during
|
|
|
|
linking.
|
|
|
|
|
|
|
|
- ctf_dynset_* is an interface to a dynamically-expanding hash that contains
|
|
|
|
only keys: no values.
|
|
|
|
|
|
|
|
These can be implemented by the same underlying hashmap if you wish. */
|
2019-04-24 05:12:16 +08:00
|
|
|
|
2020-06-03 05:00:14 +08:00
|
|
|
/* The helem is used for general key/value mappings in both the ctf_hash and
|
|
|
|
ctf_dynhash: the owner may not have space allocated for it, and will be
|
|
|
|
garbage (not NULL!) in that case. */
|
|
|
|
|
2019-04-24 05:12:16 +08:00
|
|
|
typedef struct ctf_helem
|
|
|
|
{
|
|
|
|
void *key; /* Either a pointer, or a coerced ctf_id_t. */
|
|
|
|
void *value; /* The value (possibly a coerced int). */
|
2020-06-03 05:00:14 +08:00
|
|
|
ctf_dynhash_t *owner; /* The hash that owns us. */
|
2019-04-24 05:12:16 +08:00
|
|
|
} ctf_helem_t;
|
|
|
|
|
2020-06-03 05:00:14 +08:00
|
|
|
/* Equally, the key_free and value_free may not exist. */
|
|
|
|
|
2019-04-24 05:12:16 +08:00
|
|
|
struct ctf_dynhash
|
|
|
|
{
|
|
|
|
struct htab *htab;
|
|
|
|
ctf_hash_free_fun key_free;
|
|
|
|
ctf_hash_free_fun value_free;
|
|
|
|
};
|
|
|
|
|
libctf, hash: introduce the ctf_dynset
There are many places in the deduplicator which use hashtables as tiny
sets: keys with no value (and usually, but not always, no freeing
function) often with only one or a few members. For each of these, even
after the last change to not store the freeing functions, we are storing
a little malloced block for each item just to track the key/value pair,
and a little malloced block for the hash table itself just to track the
freeing function because we can't use libiberty hashtab's freeing
function because we are using that to free the little malloced per-item
block.
If we only have a key, we don't need any of that: we can ditch the
per-malloced block because we don't have a value, and we can ditch the
per-hashtab structure because we don't need to independently track the
freeing functions since libiberty hashtab is doing it for us. That
means we don't need an owner field in the (now nonexistent) item block
either.
Roughly speaking, this datatype saves about 25% in time and 20% in peak
memory usage for normal links, even fairly big ones. So this might seem
redundant, but it's really worth it.
Instead of a _lookup function, a dynset has two distinct functions:
ctf_dynset_exists, which returns true or false and an optional pointer
to the set member, and ctf_dynhash_lookup_any, which is used if all
members of the set are expected to be equivalent and we just want *any*
member and we don't care which one.
There is no iterator in this set of functions, not because we don't
iterate over dynset members -- we do, a lot -- but because the iterator
here is a member of an entirely new family of much more convenient
iteration functions, introduced in the next commit.
libctf/
* ctf-hash.c (ctf_dynset_eq_string): New.
(ctf_dynset_create): New.
(DYNSET_EMPTY_ENTRY_REPLACEMENT): New.
(DYNSET_DELETED_ENTRY_REPLACEMENT): New.
(key_to_internal): New.
(internal_to_key): New.
(ctf_dynset_insert): New.
(ctf_dynset_remove): New.
(ctf_dynset_destroy): New.
(ctf_dynset_lookup): New.
(ctf_dynset_exists): New.
(ctf_dynset_lookup_any): New.
(ctf_hash_insert_type): Coding style.
(ctf_hash_define_type): Likewise.
* ctf-impl.h (ctf_dynset_t): New.
(ctf_dynset_eq_string): New.
(ctf_dynset_create): New.
(ctf_dynset_insert): New.
(ctf_dynset_remove): New.
(ctf_dynset_destroy): New.
(ctf_dynset_lookup): New.
(ctf_dynset_exists): New.
(ctf_dynset_lookup_any): New.
* ctf-inlines.h (ctf_dynset_cinsert): New.
2020-06-03 05:26:38 +08:00
|
|
|
/* Hash and eq functions for the dynhash and hash. */
|
2019-04-24 05:12:16 +08:00
|
|
|
|
|
|
|
unsigned int
|
|
|
|
ctf_hash_integer (const void *ptr)
|
|
|
|
{
|
|
|
|
ctf_helem_t *hep = (ctf_helem_t *) ptr;
|
|
|
|
|
|
|
|
return htab_hash_pointer (hep->key);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ctf_hash_eq_integer (const void *a, const void *b)
|
|
|
|
{
|
|
|
|
ctf_helem_t *hep_a = (ctf_helem_t *) a;
|
|
|
|
ctf_helem_t *hep_b = (ctf_helem_t *) b;
|
|
|
|
|
|
|
|
return htab_eq_pointer (hep_a->key, hep_b->key);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int
|
|
|
|
ctf_hash_string (const void *ptr)
|
|
|
|
{
|
|
|
|
ctf_helem_t *hep = (ctf_helem_t *) ptr;
|
|
|
|
|
|
|
|
return htab_hash_string (hep->key);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ctf_hash_eq_string (const void *a, const void *b)
|
|
|
|
{
|
|
|
|
ctf_helem_t *hep_a = (ctf_helem_t *) a;
|
|
|
|
ctf_helem_t *hep_b = (ctf_helem_t *) b;
|
|
|
|
|
|
|
|
return !strcmp((const char *) hep_a->key, (const char *) hep_b->key);
|
|
|
|
}
|
|
|
|
|
2020-06-05 00:21:10 +08:00
|
|
|
/* Hash a type_key. */
|
2019-07-14 04:31:26 +08:00
|
|
|
unsigned int
|
2020-06-05 00:21:10 +08:00
|
|
|
ctf_hash_type_key (const void *ptr)
|
2019-07-14 04:31:26 +08:00
|
|
|
{
|
|
|
|
ctf_helem_t *hep = (ctf_helem_t *) ptr;
|
2020-06-05 00:21:10 +08:00
|
|
|
ctf_link_type_key_t *k = (ctf_link_type_key_t *) hep->key;
|
2019-07-14 04:31:26 +08:00
|
|
|
|
2020-06-05 00:21:10 +08:00
|
|
|
return htab_hash_pointer (k->cltk_fp) + 59
|
|
|
|
* htab_hash_pointer ((void *) (uintptr_t) k->cltk_idx);
|
2019-07-14 04:31:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2020-06-05 00:21:10 +08:00
|
|
|
ctf_hash_eq_type_key (const void *a, const void *b)
|
2019-07-14 04:31:26 +08:00
|
|
|
{
|
|
|
|
ctf_helem_t *hep_a = (ctf_helem_t *) a;
|
|
|
|
ctf_helem_t *hep_b = (ctf_helem_t *) b;
|
2020-06-05 00:21:10 +08:00
|
|
|
ctf_link_type_key_t *key_a = (ctf_link_type_key_t *) hep_a->key;
|
|
|
|
ctf_link_type_key_t *key_b = (ctf_link_type_key_t *) hep_b->key;
|
2019-07-14 04:31:26 +08:00
|
|
|
|
2020-06-05 00:21:10 +08:00
|
|
|
return (key_a->cltk_fp == key_b->cltk_fp)
|
|
|
|
&& (key_a->cltk_idx == key_b->cltk_idx);
|
2019-07-14 04:31:26 +08:00
|
|
|
}
|
|
|
|
|
2020-06-06 01:35:46 +08:00
|
|
|
/* Hash a type_id_key. */
|
|
|
|
unsigned int
|
|
|
|
ctf_hash_type_id_key (const void *ptr)
|
|
|
|
{
|
|
|
|
ctf_helem_t *hep = (ctf_helem_t *) ptr;
|
|
|
|
ctf_type_id_key_t *k = (ctf_type_id_key_t *) hep->key;
|
|
|
|
|
|
|
|
return htab_hash_pointer ((void *) (uintptr_t) k->ctii_input_num)
|
|
|
|
+ 59 * htab_hash_pointer ((void *) (uintptr_t) k->ctii_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ctf_hash_eq_type_id_key (const void *a, const void *b)
|
|
|
|
{
|
|
|
|
ctf_helem_t *hep_a = (ctf_helem_t *) a;
|
|
|
|
ctf_helem_t *hep_b = (ctf_helem_t *) b;
|
|
|
|
ctf_type_id_key_t *key_a = (ctf_type_id_key_t *) hep_a->key;
|
|
|
|
ctf_type_id_key_t *key_b = (ctf_type_id_key_t *) hep_b->key;
|
|
|
|
|
|
|
|
return (key_a->ctii_input_num == key_b->ctii_input_num)
|
|
|
|
&& (key_a->ctii_type == key_b->ctii_type);
|
|
|
|
}
|
libctf, hash: introduce the ctf_dynset
There are many places in the deduplicator which use hashtables as tiny
sets: keys with no value (and usually, but not always, no freeing
function) often with only one or a few members. For each of these, even
after the last change to not store the freeing functions, we are storing
a little malloced block for each item just to track the key/value pair,
and a little malloced block for the hash table itself just to track the
freeing function because we can't use libiberty hashtab's freeing
function because we are using that to free the little malloced per-item
block.
If we only have a key, we don't need any of that: we can ditch the
per-malloced block because we don't have a value, and we can ditch the
per-hashtab structure because we don't need to independently track the
freeing functions since libiberty hashtab is doing it for us. That
means we don't need an owner field in the (now nonexistent) item block
either.
Roughly speaking, this datatype saves about 25% in time and 20% in peak
memory usage for normal links, even fairly big ones. So this might seem
redundant, but it's really worth it.
Instead of a _lookup function, a dynset has two distinct functions:
ctf_dynset_exists, which returns true or false and an optional pointer
to the set member, and ctf_dynhash_lookup_any, which is used if all
members of the set are expected to be equivalent and we just want *any*
member and we don't care which one.
There is no iterator in this set of functions, not because we don't
iterate over dynset members -- we do, a lot -- but because the iterator
here is a member of an entirely new family of much more convenient
iteration functions, introduced in the next commit.
libctf/
* ctf-hash.c (ctf_dynset_eq_string): New.
(ctf_dynset_create): New.
(DYNSET_EMPTY_ENTRY_REPLACEMENT): New.
(DYNSET_DELETED_ENTRY_REPLACEMENT): New.
(key_to_internal): New.
(internal_to_key): New.
(ctf_dynset_insert): New.
(ctf_dynset_remove): New.
(ctf_dynset_destroy): New.
(ctf_dynset_lookup): New.
(ctf_dynset_exists): New.
(ctf_dynset_lookup_any): New.
(ctf_hash_insert_type): Coding style.
(ctf_hash_define_type): Likewise.
* ctf-impl.h (ctf_dynset_t): New.
(ctf_dynset_eq_string): New.
(ctf_dynset_create): New.
(ctf_dynset_insert): New.
(ctf_dynset_remove): New.
(ctf_dynset_destroy): New.
(ctf_dynset_lookup): New.
(ctf_dynset_exists): New.
(ctf_dynset_lookup_any): New.
* ctf-inlines.h (ctf_dynset_cinsert): New.
2020-06-03 05:26:38 +08:00
|
|
|
|
|
|
|
/* Hash and eq functions for the dynset. Most of these can just use the
|
|
|
|
underlying hashtab functions directly. */
|
|
|
|
|
|
|
|
int
|
|
|
|
ctf_dynset_eq_string (const void *a, const void *b)
|
|
|
|
{
|
|
|
|
return !strcmp((const char *) a, (const char *) b);
|
|
|
|
}
|
|
|
|
|
2019-04-24 05:12:16 +08:00
|
|
|
/* The dynhash, used for hashes whose size is not known at creation time. */
|
|
|
|
|
2020-06-03 05:00:14 +08:00
|
|
|
/* Free a single ctf_helem with arbitrary key/value functions. */
|
2019-04-24 05:12:16 +08:00
|
|
|
|
|
|
|
static void
|
|
|
|
ctf_dynhash_item_free (void *item)
|
|
|
|
{
|
|
|
|
ctf_helem_t *helem = item;
|
|
|
|
|
2020-06-03 05:00:14 +08:00
|
|
|
if (helem->owner->key_free && helem->key)
|
|
|
|
helem->owner->key_free (helem->key);
|
|
|
|
if (helem->owner->value_free && helem->value)
|
|
|
|
helem->owner->value_free (helem->value);
|
2019-04-24 05:12:16 +08:00
|
|
|
free (helem);
|
|
|
|
}
|
|
|
|
|
|
|
|
ctf_dynhash_t *
|
|
|
|
ctf_dynhash_create (ctf_hash_fun hash_fun, ctf_hash_eq_fun eq_fun,
|
|
|
|
ctf_hash_free_fun key_free, ctf_hash_free_fun value_free)
|
|
|
|
{
|
|
|
|
ctf_dynhash_t *dynhash;
|
2020-06-03 05:00:14 +08:00
|
|
|
htab_del del = ctf_dynhash_item_free;
|
2019-04-24 05:12:16 +08:00
|
|
|
|
2020-06-03 05:00:14 +08:00
|
|
|
if (key_free || value_free)
|
|
|
|
dynhash = malloc (sizeof (ctf_dynhash_t));
|
|
|
|
else
|
|
|
|
dynhash = malloc (offsetof (ctf_dynhash_t, key_free));
|
2019-04-24 05:12:16 +08:00
|
|
|
if (!dynhash)
|
|
|
|
return NULL;
|
|
|
|
|
2020-06-03 05:00:14 +08:00
|
|
|
if (key_free == NULL && value_free == NULL)
|
|
|
|
del = free;
|
|
|
|
|
|
|
|
/* 7 is arbitrary and untested for now. */
|
2019-04-24 05:12:16 +08:00
|
|
|
if ((dynhash->htab = htab_create_alloc (7, (htab_hash) hash_fun, eq_fun,
|
2020-06-03 05:00:14 +08:00
|
|
|
del, xcalloc, free)) == NULL)
|
2019-04-24 05:12:16 +08:00
|
|
|
{
|
|
|
|
free (dynhash);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-06-03 05:00:14 +08:00
|
|
|
if (key_free || value_free)
|
|
|
|
{
|
|
|
|
dynhash->key_free = key_free;
|
|
|
|
dynhash->value_free = value_free;
|
|
|
|
}
|
2019-04-24 05:12:16 +08:00
|
|
|
|
|
|
|
return dynhash;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ctf_helem_t **
|
|
|
|
ctf_hashtab_lookup (struct htab *htab, const void *key, enum insert_option insert)
|
|
|
|
{
|
|
|
|
ctf_helem_t tmp = { .key = (void *) key };
|
|
|
|
return (ctf_helem_t **) htab_find_slot (htab, &tmp, insert);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ctf_helem_t *
|
2019-07-24 22:21:56 +08:00
|
|
|
ctf_hashtab_insert (struct htab *htab, void *key, void *value,
|
|
|
|
ctf_hash_free_fun key_free,
|
|
|
|
ctf_hash_free_fun value_free)
|
2019-04-24 05:12:16 +08:00
|
|
|
{
|
|
|
|
ctf_helem_t **slot;
|
|
|
|
|
|
|
|
slot = ctf_hashtab_lookup (htab, key, INSERT);
|
|
|
|
|
|
|
|
if (!slot)
|
|
|
|
{
|
2020-06-03 05:00:14 +08:00
|
|
|
errno = ENOMEM;
|
2019-04-24 05:12:16 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!*slot)
|
|
|
|
{
|
2020-06-03 05:00:14 +08:00
|
|
|
/* Only spend space on the owner if we're going to use it: if there is a
|
|
|
|
key or value freeing function. */
|
|
|
|
if (key_free || value_free)
|
|
|
|
*slot = malloc (sizeof (ctf_helem_t));
|
|
|
|
else
|
|
|
|
*slot = malloc (offsetof (ctf_helem_t, owner));
|
2019-04-24 05:12:16 +08:00
|
|
|
if (!*slot)
|
|
|
|
return NULL;
|
2020-06-03 04:48:12 +08:00
|
|
|
(*slot)->key = key;
|
2019-04-24 05:12:16 +08:00
|
|
|
}
|
2019-07-24 22:21:56 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
if (key_free)
|
2020-06-03 04:48:12 +08:00
|
|
|
key_free (key);
|
2019-07-24 22:21:56 +08:00
|
|
|
if (value_free)
|
|
|
|
value_free ((*slot)->value);
|
|
|
|
}
|
2019-04-24 05:12:16 +08:00
|
|
|
(*slot)->value = value;
|
|
|
|
return *slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ctf_dynhash_insert (ctf_dynhash_t *hp, void *key, void *value)
|
|
|
|
{
|
|
|
|
ctf_helem_t *slot;
|
2020-06-03 05:00:14 +08:00
|
|
|
ctf_hash_free_fun key_free = NULL, value_free = NULL;
|
2019-04-24 05:12:16 +08:00
|
|
|
|
2020-06-03 05:00:14 +08:00
|
|
|
if (hp->htab->del_f == ctf_dynhash_item_free)
|
|
|
|
{
|
|
|
|
key_free = hp->key_free;
|
|
|
|
value_free = hp->value_free;
|
|
|
|
}
|
2019-07-24 22:21:56 +08:00
|
|
|
slot = ctf_hashtab_insert (hp->htab, key, value,
|
2020-06-03 05:00:14 +08:00
|
|
|
key_free, value_free);
|
2019-04-24 05:12:16 +08:00
|
|
|
|
|
|
|
if (!slot)
|
|
|
|
return errno;
|
|
|
|
|
2020-06-03 05:00:14 +08:00
|
|
|
/* Keep track of the owner, so that the del function can get at the key_free
|
|
|
|
and value_free functions. Only do this if one of those functions is set:
|
|
|
|
if not, the owner is not even present in the helem. */
|
2019-04-24 05:12:16 +08:00
|
|
|
|
2020-06-03 05:00:14 +08:00
|
|
|
if (key_free || value_free)
|
|
|
|
slot->owner = hp;
|
2019-04-24 05:12:16 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ctf_dynhash_remove (ctf_dynhash_t *hp, const void *key)
|
|
|
|
{
|
2020-06-03 05:00:14 +08:00
|
|
|
ctf_helem_t hep = { (void *) key, NULL, NULL };
|
2019-06-29 04:58:31 +08:00
|
|
|
htab_remove_elt (hp->htab, &hep);
|
2019-04-24 05:12:16 +08:00
|
|
|
}
|
|
|
|
|
2019-07-14 04:31:26 +08:00
|
|
|
void
|
|
|
|
ctf_dynhash_empty (ctf_dynhash_t *hp)
|
|
|
|
{
|
|
|
|
htab_empty (hp->htab);
|
|
|
|
}
|
|
|
|
|
2020-06-03 04:31:45 +08:00
|
|
|
size_t
|
|
|
|
ctf_dynhash_elements (ctf_dynhash_t *hp)
|
|
|
|
{
|
|
|
|
return htab_elements (hp->htab);
|
|
|
|
}
|
|
|
|
|
2019-04-24 05:12:16 +08:00
|
|
|
void *
|
|
|
|
ctf_dynhash_lookup (ctf_dynhash_t *hp, const void *key)
|
|
|
|
{
|
|
|
|
ctf_helem_t **slot;
|
|
|
|
|
|
|
|
slot = ctf_hashtab_lookup (hp->htab, key, NO_INSERT);
|
|
|
|
|
|
|
|
if (slot)
|
|
|
|
return (*slot)->value;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-06-03 04:31:45 +08:00
|
|
|
/* TRUE/FALSE return. */
|
|
|
|
int
|
|
|
|
ctf_dynhash_lookup_kv (ctf_dynhash_t *hp, const void *key,
|
|
|
|
const void **orig_key, void **value)
|
|
|
|
{
|
|
|
|
ctf_helem_t **slot;
|
|
|
|
|
|
|
|
slot = ctf_hashtab_lookup (hp->htab, key, NO_INSERT);
|
|
|
|
|
|
|
|
if (slot)
|
|
|
|
{
|
|
|
|
if (orig_key)
|
|
|
|
*orig_key = (*slot)->key;
|
|
|
|
if (value)
|
|
|
|
*value = (*slot)->value;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-27 20:30:22 +08:00
|
|
|
typedef struct ctf_traverse_cb_arg
|
|
|
|
{
|
|
|
|
ctf_hash_iter_f fun;
|
|
|
|
void *arg;
|
|
|
|
} ctf_traverse_cb_arg_t;
|
|
|
|
|
|
|
|
static int
|
|
|
|
ctf_hashtab_traverse (void **slot, void *arg_)
|
|
|
|
{
|
|
|
|
ctf_helem_t *helem = *((ctf_helem_t **) slot);
|
|
|
|
ctf_traverse_cb_arg_t *arg = (ctf_traverse_cb_arg_t *) arg_;
|
|
|
|
|
|
|
|
arg->fun (helem->key, helem->value, arg->arg);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ctf_dynhash_iter (ctf_dynhash_t *hp, ctf_hash_iter_f fun, void *arg_)
|
|
|
|
{
|
|
|
|
ctf_traverse_cb_arg_t arg = { fun, arg_ };
|
|
|
|
htab_traverse (hp->htab, ctf_hashtab_traverse, &arg);
|
|
|
|
}
|
|
|
|
|
2020-06-03 04:31:45 +08:00
|
|
|
typedef struct ctf_traverse_find_cb_arg
|
|
|
|
{
|
|
|
|
ctf_hash_iter_find_f fun;
|
|
|
|
void *arg;
|
|
|
|
void *found_key;
|
|
|
|
} ctf_traverse_find_cb_arg_t;
|
|
|
|
|
|
|
|
static int
|
|
|
|
ctf_hashtab_traverse_find (void **slot, void *arg_)
|
|
|
|
{
|
|
|
|
ctf_helem_t *helem = *((ctf_helem_t **) slot);
|
|
|
|
ctf_traverse_find_cb_arg_t *arg = (ctf_traverse_find_cb_arg_t *) arg_;
|
|
|
|
|
|
|
|
if (arg->fun (helem->key, helem->value, arg->arg))
|
|
|
|
{
|
|
|
|
arg->found_key = helem->key;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
ctf_dynhash_iter_find (ctf_dynhash_t *hp, ctf_hash_iter_find_f fun, void *arg_)
|
|
|
|
{
|
|
|
|
ctf_traverse_find_cb_arg_t arg = { fun, arg_, NULL };
|
|
|
|
htab_traverse (hp->htab, ctf_hashtab_traverse_find, &arg);
|
|
|
|
return arg.found_key;
|
|
|
|
}
|
|
|
|
|
2019-06-27 20:30:22 +08:00
|
|
|
typedef struct ctf_traverse_remove_cb_arg
|
|
|
|
{
|
|
|
|
struct htab *htab;
|
|
|
|
ctf_hash_iter_remove_f fun;
|
|
|
|
void *arg;
|
|
|
|
} ctf_traverse_remove_cb_arg_t;
|
|
|
|
|
|
|
|
static int
|
|
|
|
ctf_hashtab_traverse_remove (void **slot, void *arg_)
|
|
|
|
{
|
|
|
|
ctf_helem_t *helem = *((ctf_helem_t **) slot);
|
|
|
|
ctf_traverse_remove_cb_arg_t *arg = (ctf_traverse_remove_cb_arg_t *) arg_;
|
|
|
|
|
|
|
|
if (arg->fun (helem->key, helem->value, arg->arg))
|
|
|
|
htab_clear_slot (arg->htab, slot);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ctf_dynhash_iter_remove (ctf_dynhash_t *hp, ctf_hash_iter_remove_f fun,
|
|
|
|
void *arg_)
|
|
|
|
{
|
|
|
|
ctf_traverse_remove_cb_arg_t arg = { hp->htab, fun, arg_ };
|
|
|
|
htab_traverse (hp->htab, ctf_hashtab_traverse_remove, &arg);
|
|
|
|
}
|
|
|
|
|
2020-06-03 23:36:18 +08:00
|
|
|
/* Traverse a dynhash in arbitrary order, in _next iterator form.
|
|
|
|
|
|
|
|
Mutating the dynhash while iterating is not supported (just as it isn't for
|
|
|
|
htab_traverse).
|
|
|
|
|
|
|
|
Note: unusually, this returns zero on success and a *positive* value on
|
|
|
|
error, because it does not take an fp, taking an error pointer would be
|
|
|
|
incredibly clunky, and nearly all error-handling ends up stuffing the result
|
|
|
|
of this into some sort of errno or ctf_errno, which is invariably
|
|
|
|
positive. So doing this simplifies essentially all callers. */
|
|
|
|
int
|
|
|
|
ctf_dynhash_next (ctf_dynhash_t *h, ctf_next_t **it, void **key, void **value)
|
|
|
|
{
|
|
|
|
ctf_next_t *i = *it;
|
|
|
|
ctf_helem_t *slot;
|
|
|
|
|
|
|
|
if (!i)
|
|
|
|
{
|
|
|
|
size_t size = htab_size (h->htab);
|
|
|
|
|
|
|
|
/* If the table has too many entries to fit in an ssize_t, just give up.
|
|
|
|
This might be spurious, but if any type-related hashtable has ever been
|
|
|
|
nearly as large as that then something very odd is going on. */
|
|
|
|
if (((ssize_t) size) < 0)
|
|
|
|
return EDOM;
|
|
|
|
|
|
|
|
if ((i = ctf_next_create ()) == NULL)
|
|
|
|
return ENOMEM;
|
|
|
|
|
|
|
|
i->u.ctn_hash_slot = h->htab->entries;
|
|
|
|
i->cu.ctn_h = h;
|
|
|
|
i->ctn_n = 0;
|
|
|
|
i->ctn_size = (ssize_t) size;
|
|
|
|
i->ctn_iter_fun = (void (*) (void)) ctf_dynhash_next;
|
|
|
|
*it = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((void (*) (void)) ctf_dynhash_next != i->ctn_iter_fun)
|
|
|
|
return ECTF_NEXT_WRONGFUN;
|
|
|
|
|
|
|
|
if (h != i->cu.ctn_h)
|
|
|
|
return ECTF_NEXT_WRONGFP;
|
|
|
|
|
|
|
|
if ((ssize_t) i->ctn_n == i->ctn_size)
|
|
|
|
goto hash_end;
|
|
|
|
|
|
|
|
while ((ssize_t) i->ctn_n < i->ctn_size
|
|
|
|
&& (*i->u.ctn_hash_slot == HTAB_EMPTY_ENTRY
|
|
|
|
|| *i->u.ctn_hash_slot == HTAB_DELETED_ENTRY))
|
|
|
|
{
|
|
|
|
i->u.ctn_hash_slot++;
|
|
|
|
i->ctn_n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((ssize_t) i->ctn_n == i->ctn_size)
|
|
|
|
goto hash_end;
|
|
|
|
|
|
|
|
slot = *i->u.ctn_hash_slot;
|
|
|
|
|
|
|
|
if (key)
|
|
|
|
*key = slot->key;
|
|
|
|
if (value)
|
|
|
|
*value = slot->value;
|
|
|
|
|
|
|
|
i->u.ctn_hash_slot++;
|
|
|
|
i->ctn_n++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
hash_end:
|
|
|
|
ctf_next_destroy (i);
|
|
|
|
*it = NULL;
|
|
|
|
return ECTF_NEXT_END;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Traverse a sorted dynhash, in _next iterator form.
|
|
|
|
|
|
|
|
See ctf_dynhash_next for notes on error returns, etc.
|
|
|
|
|
|
|
|
Sort keys before iterating over them using the SORT_FUN and SORT_ARG.
|
|
|
|
|
|
|
|
If SORT_FUN is null, thunks to ctf_dynhash_next. */
|
|
|
|
int
|
|
|
|
ctf_dynhash_next_sorted (ctf_dynhash_t *h, ctf_next_t **it, void **key,
|
|
|
|
void **value, ctf_hash_sort_f sort_fun, void *sort_arg)
|
|
|
|
{
|
|
|
|
ctf_next_t *i = *it;
|
|
|
|
|
|
|
|
if (sort_fun == NULL)
|
|
|
|
return ctf_dynhash_next (h, it, key, value);
|
|
|
|
|
|
|
|
if (!i)
|
|
|
|
{
|
|
|
|
size_t els = ctf_dynhash_elements (h);
|
|
|
|
ctf_next_t *accum_i = NULL;
|
|
|
|
void *key, *value;
|
|
|
|
int err;
|
|
|
|
ctf_next_hkv_t *walk;
|
|
|
|
|
|
|
|
if (((ssize_t) els) < 0)
|
|
|
|
return EDOM;
|
|
|
|
|
|
|
|
if ((i = ctf_next_create ()) == NULL)
|
|
|
|
return ENOMEM;
|
|
|
|
|
|
|
|
if ((i->u.ctn_sorted_hkv = calloc (els, sizeof (ctf_next_hkv_t))) == NULL)
|
|
|
|
{
|
|
|
|
ctf_next_destroy (i);
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
walk = i->u.ctn_sorted_hkv;
|
|
|
|
|
|
|
|
i->cu.ctn_h = h;
|
|
|
|
|
|
|
|
while ((err = ctf_dynhash_next (h, &accum_i, &key, &value)) == 0)
|
|
|
|
{
|
|
|
|
walk->hkv_key = key;
|
|
|
|
walk->hkv_value = value;
|
|
|
|
walk++;
|
|
|
|
}
|
|
|
|
if (err != ECTF_NEXT_END)
|
|
|
|
{
|
|
|
|
ctf_next_destroy (i);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sort_fun)
|
|
|
|
ctf_qsort_r (i->u.ctn_sorted_hkv, els, sizeof (ctf_next_hkv_t),
|
|
|
|
(int (*) (const void *, const void *, void *)) sort_fun,
|
|
|
|
sort_arg);
|
|
|
|
i->ctn_n = 0;
|
|
|
|
i->ctn_size = (ssize_t) els;
|
|
|
|
i->ctn_iter_fun = (void (*) (void)) ctf_dynhash_next_sorted;
|
|
|
|
*it = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((void (*) (void)) ctf_dynhash_next_sorted != i->ctn_iter_fun)
|
|
|
|
return ECTF_NEXT_WRONGFUN;
|
|
|
|
|
|
|
|
if (h != i->cu.ctn_h)
|
|
|
|
return ECTF_NEXT_WRONGFP;
|
|
|
|
|
|
|
|
if ((ssize_t) i->ctn_n == i->ctn_size)
|
|
|
|
{
|
|
|
|
ctf_next_destroy (i);
|
|
|
|
*it = NULL;
|
|
|
|
return ECTF_NEXT_END;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (key)
|
|
|
|
*key = i->u.ctn_sorted_hkv[i->ctn_n].hkv_key;
|
|
|
|
if (value)
|
|
|
|
*value = i->u.ctn_sorted_hkv[i->ctn_n].hkv_value;
|
|
|
|
i->ctn_n++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-04-24 05:12:16 +08:00
|
|
|
void
|
|
|
|
ctf_dynhash_destroy (ctf_dynhash_t *hp)
|
|
|
|
{
|
|
|
|
if (hp != NULL)
|
|
|
|
htab_delete (hp->htab);
|
|
|
|
free (hp);
|
|
|
|
}
|
|
|
|
|
libctf, hash: introduce the ctf_dynset
There are many places in the deduplicator which use hashtables as tiny
sets: keys with no value (and usually, but not always, no freeing
function) often with only one or a few members. For each of these, even
after the last change to not store the freeing functions, we are storing
a little malloced block for each item just to track the key/value pair,
and a little malloced block for the hash table itself just to track the
freeing function because we can't use libiberty hashtab's freeing
function because we are using that to free the little malloced per-item
block.
If we only have a key, we don't need any of that: we can ditch the
per-malloced block because we don't have a value, and we can ditch the
per-hashtab structure because we don't need to independently track the
freeing functions since libiberty hashtab is doing it for us. That
means we don't need an owner field in the (now nonexistent) item block
either.
Roughly speaking, this datatype saves about 25% in time and 20% in peak
memory usage for normal links, even fairly big ones. So this might seem
redundant, but it's really worth it.
Instead of a _lookup function, a dynset has two distinct functions:
ctf_dynset_exists, which returns true or false and an optional pointer
to the set member, and ctf_dynhash_lookup_any, which is used if all
members of the set are expected to be equivalent and we just want *any*
member and we don't care which one.
There is no iterator in this set of functions, not because we don't
iterate over dynset members -- we do, a lot -- but because the iterator
here is a member of an entirely new family of much more convenient
iteration functions, introduced in the next commit.
libctf/
* ctf-hash.c (ctf_dynset_eq_string): New.
(ctf_dynset_create): New.
(DYNSET_EMPTY_ENTRY_REPLACEMENT): New.
(DYNSET_DELETED_ENTRY_REPLACEMENT): New.
(key_to_internal): New.
(internal_to_key): New.
(ctf_dynset_insert): New.
(ctf_dynset_remove): New.
(ctf_dynset_destroy): New.
(ctf_dynset_lookup): New.
(ctf_dynset_exists): New.
(ctf_dynset_lookup_any): New.
(ctf_hash_insert_type): Coding style.
(ctf_hash_define_type): Likewise.
* ctf-impl.h (ctf_dynset_t): New.
(ctf_dynset_eq_string): New.
(ctf_dynset_create): New.
(ctf_dynset_insert): New.
(ctf_dynset_remove): New.
(ctf_dynset_destroy): New.
(ctf_dynset_lookup): New.
(ctf_dynset_exists): New.
(ctf_dynset_lookup_any): New.
* ctf-inlines.h (ctf_dynset_cinsert): New.
2020-06-03 05:26:38 +08:00
|
|
|
/* The dynset, used for sets of keys with no value. The implementation of this
|
|
|
|
can be much simpler, because without a value the slot can simply be the
|
|
|
|
stored key, which means we don't need to store the freeing functions and the
|
|
|
|
dynset itself is just a htab. */
|
|
|
|
|
|
|
|
ctf_dynset_t *
|
|
|
|
ctf_dynset_create (htab_hash hash_fun, htab_eq eq_fun,
|
|
|
|
ctf_hash_free_fun key_free)
|
|
|
|
{
|
|
|
|
/* 7 is arbitrary and untested for now. */
|
|
|
|
return (ctf_dynset_t *) htab_create_alloc (7, (htab_hash) hash_fun, eq_fun,
|
|
|
|
key_free, xcalloc, free);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The dynset has one complexity: the underlying implementation reserves two
|
|
|
|
values for internal hash table implementation details (empty versus deleted
|
|
|
|
entries). These values are otherwise very useful for pointers cast to ints,
|
|
|
|
so transform the ctf_dynset_inserted value to allow for it. (This
|
|
|
|
introduces an ambiguity in that one can no longer store these two values in
|
|
|
|
the dynset, but if we pick high enough values this is very unlikely to be a
|
|
|
|
problem.)
|
|
|
|
|
|
|
|
We leak this implementation detail to the freeing functions on the grounds
|
|
|
|
that any use of these functions is overwhelmingly likely to be in sets using
|
|
|
|
real pointers, which will be unaffected. */
|
|
|
|
|
|
|
|
#define DYNSET_EMPTY_ENTRY_REPLACEMENT ((void *) (uintptr_t) -64)
|
|
|
|
#define DYNSET_DELETED_ENTRY_REPLACEMENT ((void *) (uintptr_t) -63)
|
|
|
|
|
|
|
|
static void *
|
|
|
|
key_to_internal (const void *key)
|
|
|
|
{
|
|
|
|
if (key == HTAB_EMPTY_ENTRY)
|
|
|
|
return DYNSET_EMPTY_ENTRY_REPLACEMENT;
|
|
|
|
else if (key == HTAB_DELETED_ENTRY)
|
|
|
|
return DYNSET_DELETED_ENTRY_REPLACEMENT;
|
|
|
|
|
|
|
|
return (void *) key;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
internal_to_key (const void *internal)
|
|
|
|
{
|
|
|
|
if (internal == DYNSET_EMPTY_ENTRY_REPLACEMENT)
|
|
|
|
return HTAB_EMPTY_ENTRY;
|
|
|
|
else if (internal == DYNSET_DELETED_ENTRY_REPLACEMENT)
|
|
|
|
return HTAB_DELETED_ENTRY;
|
|
|
|
return (void *) internal;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ctf_dynset_insert (ctf_dynset_t *hp, void *key)
|
|
|
|
{
|
|
|
|
struct htab *htab = (struct htab *) hp;
|
|
|
|
void **slot;
|
|
|
|
|
|
|
|
slot = htab_find_slot (htab, key, INSERT);
|
|
|
|
|
|
|
|
if (!slot)
|
|
|
|
{
|
|
|
|
errno = ENOMEM;
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*slot)
|
|
|
|
{
|
|
|
|
if (htab->del_f)
|
|
|
|
(*htab->del_f) (*slot);
|
|
|
|
}
|
|
|
|
|
|
|
|
*slot = key_to_internal (key);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ctf_dynset_remove (ctf_dynset_t *hp, const void *key)
|
|
|
|
{
|
|
|
|
htab_remove_elt ((struct htab *) hp, key_to_internal (key));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ctf_dynset_destroy (ctf_dynset_t *hp)
|
|
|
|
{
|
|
|
|
if (hp != NULL)
|
|
|
|
htab_delete ((struct htab *) hp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
ctf_dynset_lookup (ctf_dynset_t *hp, const void *key)
|
|
|
|
{
|
|
|
|
void **slot = htab_find_slot ((struct htab *) hp,
|
|
|
|
key_to_internal (key), NO_INSERT);
|
|
|
|
|
|
|
|
if (slot)
|
|
|
|
return internal_to_key (*slot);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TRUE/FALSE return. */
|
|
|
|
int
|
|
|
|
ctf_dynset_exists (ctf_dynset_t *hp, const void *key, const void **orig_key)
|
|
|
|
{
|
|
|
|
void **slot = htab_find_slot ((struct htab *) hp,
|
|
|
|
key_to_internal (key), NO_INSERT);
|
|
|
|
|
|
|
|
if (orig_key && slot)
|
|
|
|
*orig_key = internal_to_key (*slot);
|
|
|
|
return (slot != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Look up a completely random value from the set, if any exist.
|
|
|
|
Keys with value zero cannot be distinguished from a nonexistent key. */
|
|
|
|
void *
|
|
|
|
ctf_dynset_lookup_any (ctf_dynset_t *hp)
|
|
|
|
{
|
|
|
|
struct htab *htab = (struct htab *) hp;
|
|
|
|
void **slot = htab->entries;
|
|
|
|
void **limit = slot + htab_size (htab);
|
|
|
|
|
|
|
|
while (slot < limit
|
|
|
|
&& (*slot == HTAB_EMPTY_ENTRY || *slot == HTAB_DELETED_ENTRY))
|
|
|
|
slot++;
|
|
|
|
|
|
|
|
if (slot < limit)
|
|
|
|
return internal_to_key (*slot);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-06-03 23:36:18 +08:00
|
|
|
/* Traverse a dynset in arbitrary order, in _next iterator form.
|
|
|
|
|
|
|
|
Otherwise, just like ctf_dynhash_next. */
|
|
|
|
int
|
|
|
|
ctf_dynset_next (ctf_dynset_t *hp, ctf_next_t **it, void **key)
|
|
|
|
{
|
|
|
|
struct htab *htab = (struct htab *) hp;
|
|
|
|
ctf_next_t *i = *it;
|
|
|
|
void *slot;
|
|
|
|
|
|
|
|
if (!i)
|
|
|
|
{
|
|
|
|
size_t size = htab_size (htab);
|
|
|
|
|
|
|
|
/* If the table has too many entries to fit in an ssize_t, just give up.
|
|
|
|
This might be spurious, but if any type-related hashtable has ever been
|
|
|
|
nearly as large as that then somthing very odd is going on. */
|
|
|
|
|
|
|
|
if (((ssize_t) size) < 0)
|
|
|
|
return EDOM;
|
|
|
|
|
|
|
|
if ((i = ctf_next_create ()) == NULL)
|
|
|
|
return ENOMEM;
|
|
|
|
|
|
|
|
i->u.ctn_hash_slot = htab->entries;
|
|
|
|
i->cu.ctn_s = hp;
|
|
|
|
i->ctn_n = 0;
|
|
|
|
i->ctn_size = (ssize_t) size;
|
|
|
|
i->ctn_iter_fun = (void (*) (void)) ctf_dynset_next;
|
|
|
|
*it = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((void (*) (void)) ctf_dynset_next != i->ctn_iter_fun)
|
|
|
|
return ECTF_NEXT_WRONGFUN;
|
|
|
|
|
|
|
|
if (hp != i->cu.ctn_s)
|
|
|
|
return ECTF_NEXT_WRONGFP;
|
|
|
|
|
|
|
|
if ((ssize_t) i->ctn_n == i->ctn_size)
|
|
|
|
goto set_end;
|
|
|
|
|
|
|
|
while ((ssize_t) i->ctn_n < i->ctn_size
|
|
|
|
&& (*i->u.ctn_hash_slot == HTAB_EMPTY_ENTRY
|
|
|
|
|| *i->u.ctn_hash_slot == HTAB_DELETED_ENTRY))
|
|
|
|
{
|
|
|
|
i->u.ctn_hash_slot++;
|
|
|
|
i->ctn_n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((ssize_t) i->ctn_n == i->ctn_size)
|
|
|
|
goto set_end;
|
|
|
|
|
|
|
|
slot = *i->u.ctn_hash_slot;
|
|
|
|
|
|
|
|
if (key)
|
|
|
|
*key = internal_to_key (slot);
|
|
|
|
|
|
|
|
i->u.ctn_hash_slot++;
|
|
|
|
i->ctn_n++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
set_end:
|
|
|
|
ctf_next_destroy (i);
|
|
|
|
*it = NULL;
|
|
|
|
return ECTF_NEXT_END;
|
|
|
|
}
|
|
|
|
|
2019-04-24 05:12:16 +08:00
|
|
|
/* ctf_hash, used for fixed-size maps from const char * -> ctf_id_t without
|
|
|
|
removal. This is a straight cast of a hashtab. */
|
|
|
|
|
|
|
|
ctf_hash_t *
|
|
|
|
ctf_hash_create (unsigned long nelems, ctf_hash_fun hash_fun,
|
|
|
|
ctf_hash_eq_fun eq_fun)
|
|
|
|
{
|
|
|
|
return (ctf_hash_t *) htab_create_alloc (nelems, (htab_hash) hash_fun,
|
|
|
|
eq_fun, free, xcalloc, free);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t
|
|
|
|
ctf_hash_size (const ctf_hash_t *hp)
|
|
|
|
{
|
|
|
|
return htab_elements ((struct htab *) hp);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ctf_hash_insert_type (ctf_hash_t *hp, ctf_file_t *fp, uint32_t type,
|
|
|
|
uint32_t name)
|
|
|
|
{
|
libctf: support getting strings from the ELF strtab
The CTF file format has always supported "external strtabs", which
internally are strtab offsets with their MSB on: such refs
get their strings from the strtab passed in at CTF file open time:
this is usually intended to be the ELF strtab, and that's what this
implementation is meant to support, though in theory the external
strtab could come from anywhere.
This commit adds support for these external strings in the ctf-string.c
strtab tracking layer. It's quite easy: we just add a field csa_offset
to the atoms table that tracks all strings: this field tracks the offset
of the string in the ELF strtab (with its MSB already on, courtesy of a
new macro CTF_SET_STID), and adds a new function that sets the
csa_offset to the specified offset (plus MSB). Then we just need to
avoid writing out strings to the internal strtab if they have csa_offset
set, and note that the internal strtab is shorter than it might
otherwise be.
(We could in theory save a little more time here by eschewing sorting
such strings, since we never actually write the strings out anywhere,
but that would mean storing them separately and it's just not worth the
complexity cost until profiling shows it's worth doing.)
We also have to go through a bit of extra effort at variable-sorting
time. This was previously using direct references to the internal
strtab: it couldn't use ctf_strptr or ctf_strraw because the new strtab
is not yet ready to put in its usual field (in a ctf_file_t that hasn't
even been allocated yet at this stage): but now we're using the external
strtab, this will no longer do because it'll be looking things up in the
wrong strtab, with disastrous results. Instead, pass the new internal
strtab in to a new ctf_strraw_explicit function which is just like
ctf_strraw except you can specify a ne winternal strtab to use.
But even now that it is using a new internal strtab, this is not quite
enough: it can't look up strings in the external strtab because ld
hasn't written it out yet, and when it does will write it straight to
disk. Instead, when we write the internal strtab, note all the offset
-> string mappings that we have noted belong in the *external* strtab to
a new "synthetic external strtab" dynhash, ctf_syn_ext_strtab, and look
in there at ctf_strraw time if it is set. This uses minimal extra
memory (because only strings in the external strtab that we actually use
are stored, and even those come straight out of the atoms table), but
let both variable sorting and name interning when ctf_bufopen is next
called work fine. (This also means that we don't need to filter out
spurious ECTF_STRTAB warnings from ctf_bufopen but can pass them back to
the caller, once we wrap ctf_bufopen so that we have a new internal
variant of ctf_bufopen etc that we can pass the synthetic external
strtab to. That error has been filtered out since the days of Solaris
libctf, which didn't try to handle the problem of getting external
strtabs right at construction time at all.)
v3: add the synthetic strtab and all associated machinery.
v5: fix tabdamage.
include/
* ctf.h (CTF_SET_STID): New.
libctf/
* ctf-impl.h (ctf_str_atom_t) <csa_offset>: New field.
(ctf_file_t) <ctf_syn_ext_strtab>: Likewise.
(ctf_str_add_ref): Name the last arg.
(ctf_str_add_external) New.
(ctf_str_add_strraw_explicit): Likewise.
(ctf_simple_open_internal): Likewise.
(ctf_bufopen_internal): Likewise.
* ctf-string.c (ctf_strraw_explicit): Split from...
(ctf_strraw): ... here, with new support for ctf_syn_ext_strtab.
(ctf_str_add_ref_internal): Return the atom, not the
string.
(ctf_str_add): Adjust accordingly.
(ctf_str_add_ref): Likewise. Move up in the file.
(ctf_str_add_external): New: update the csa_offset.
(ctf_str_count_strtab): Only account for strings with no csa_offset
in the internal strtab length.
(ctf_str_write_strtab): If the csa_offset is set, update the
string's refs without writing the string out, and update the
ctf_syn_ext_strtab. Make OOM handling less ugly.
* ctf-create.c (struct ctf_sort_var_arg_cb): New.
(ctf_update): Handle failure to populate the strtab. Pass in the
new ctf_sort_var arg. Adjust for ctf_syn_ext_strtab addition.
Call ctf_simple_open_internal, not ctf_simple_open.
(ctf_sort_var): Call ctf_strraw_explicit rather than looking up
strings by hand.
* ctf-hash.c (ctf_hash_insert_type): Likewise (but using
ctf_strraw). Adjust to diagnose ECTF_STRTAB nonetheless.
* ctf-open.c (init_types): No longer filter out ECTF_STRTAB.
(ctf_file_close): Destroy the ctf_syn_ext_strtab.
(ctf_simple_open): Rename to, and reimplement as a wrapper around...
(ctf_simple_open_internal): ... this new function, which calls
ctf_bufopen_internal.
(ctf_bufopen): Rename to, and reimplement as a wrapper around...
(ctf_bufopen_internal): ... this new function, which sets
ctf_syn_ext_strtab.
2019-07-14 03:33:01 +08:00
|
|
|
const char *str = ctf_strraw (fp, name);
|
2019-04-24 05:12:16 +08:00
|
|
|
|
|
|
|
if (type == 0)
|
|
|
|
return EINVAL;
|
|
|
|
|
libctf: support getting strings from the ELF strtab
The CTF file format has always supported "external strtabs", which
internally are strtab offsets with their MSB on: such refs
get their strings from the strtab passed in at CTF file open time:
this is usually intended to be the ELF strtab, and that's what this
implementation is meant to support, though in theory the external
strtab could come from anywhere.
This commit adds support for these external strings in the ctf-string.c
strtab tracking layer. It's quite easy: we just add a field csa_offset
to the atoms table that tracks all strings: this field tracks the offset
of the string in the ELF strtab (with its MSB already on, courtesy of a
new macro CTF_SET_STID), and adds a new function that sets the
csa_offset to the specified offset (plus MSB). Then we just need to
avoid writing out strings to the internal strtab if they have csa_offset
set, and note that the internal strtab is shorter than it might
otherwise be.
(We could in theory save a little more time here by eschewing sorting
such strings, since we never actually write the strings out anywhere,
but that would mean storing them separately and it's just not worth the
complexity cost until profiling shows it's worth doing.)
We also have to go through a bit of extra effort at variable-sorting
time. This was previously using direct references to the internal
strtab: it couldn't use ctf_strptr or ctf_strraw because the new strtab
is not yet ready to put in its usual field (in a ctf_file_t that hasn't
even been allocated yet at this stage): but now we're using the external
strtab, this will no longer do because it'll be looking things up in the
wrong strtab, with disastrous results. Instead, pass the new internal
strtab in to a new ctf_strraw_explicit function which is just like
ctf_strraw except you can specify a ne winternal strtab to use.
But even now that it is using a new internal strtab, this is not quite
enough: it can't look up strings in the external strtab because ld
hasn't written it out yet, and when it does will write it straight to
disk. Instead, when we write the internal strtab, note all the offset
-> string mappings that we have noted belong in the *external* strtab to
a new "synthetic external strtab" dynhash, ctf_syn_ext_strtab, and look
in there at ctf_strraw time if it is set. This uses minimal extra
memory (because only strings in the external strtab that we actually use
are stored, and even those come straight out of the atoms table), but
let both variable sorting and name interning when ctf_bufopen is next
called work fine. (This also means that we don't need to filter out
spurious ECTF_STRTAB warnings from ctf_bufopen but can pass them back to
the caller, once we wrap ctf_bufopen so that we have a new internal
variant of ctf_bufopen etc that we can pass the synthetic external
strtab to. That error has been filtered out since the days of Solaris
libctf, which didn't try to handle the problem of getting external
strtabs right at construction time at all.)
v3: add the synthetic strtab and all associated machinery.
v5: fix tabdamage.
include/
* ctf.h (CTF_SET_STID): New.
libctf/
* ctf-impl.h (ctf_str_atom_t) <csa_offset>: New field.
(ctf_file_t) <ctf_syn_ext_strtab>: Likewise.
(ctf_str_add_ref): Name the last arg.
(ctf_str_add_external) New.
(ctf_str_add_strraw_explicit): Likewise.
(ctf_simple_open_internal): Likewise.
(ctf_bufopen_internal): Likewise.
* ctf-string.c (ctf_strraw_explicit): Split from...
(ctf_strraw): ... here, with new support for ctf_syn_ext_strtab.
(ctf_str_add_ref_internal): Return the atom, not the
string.
(ctf_str_add): Adjust accordingly.
(ctf_str_add_ref): Likewise. Move up in the file.
(ctf_str_add_external): New: update the csa_offset.
(ctf_str_count_strtab): Only account for strings with no csa_offset
in the internal strtab length.
(ctf_str_write_strtab): If the csa_offset is set, update the
string's refs without writing the string out, and update the
ctf_syn_ext_strtab. Make OOM handling less ugly.
* ctf-create.c (struct ctf_sort_var_arg_cb): New.
(ctf_update): Handle failure to populate the strtab. Pass in the
new ctf_sort_var arg. Adjust for ctf_syn_ext_strtab addition.
Call ctf_simple_open_internal, not ctf_simple_open.
(ctf_sort_var): Call ctf_strraw_explicit rather than looking up
strings by hand.
* ctf-hash.c (ctf_hash_insert_type): Likewise (but using
ctf_strraw). Adjust to diagnose ECTF_STRTAB nonetheless.
* ctf-open.c (init_types): No longer filter out ECTF_STRTAB.
(ctf_file_close): Destroy the ctf_syn_ext_strtab.
(ctf_simple_open): Rename to, and reimplement as a wrapper around...
(ctf_simple_open_internal): ... this new function, which calls
ctf_bufopen_internal.
(ctf_bufopen): Rename to, and reimplement as a wrapper around...
(ctf_bufopen_internal): ... this new function, which sets
ctf_syn_ext_strtab.
2019-07-14 03:33:01 +08:00
|
|
|
if (str == NULL
|
|
|
|
&& CTF_NAME_STID (name) == CTF_STRTAB_1
|
|
|
|
&& fp->ctf_syn_ext_strtab == NULL
|
|
|
|
&& fp->ctf_str[CTF_NAME_STID (name)].cts_strs == NULL)
|
2019-04-24 05:12:16 +08:00
|
|
|
return ECTF_STRTAB;
|
|
|
|
|
libctf: support getting strings from the ELF strtab
The CTF file format has always supported "external strtabs", which
internally are strtab offsets with their MSB on: such refs
get their strings from the strtab passed in at CTF file open time:
this is usually intended to be the ELF strtab, and that's what this
implementation is meant to support, though in theory the external
strtab could come from anywhere.
This commit adds support for these external strings in the ctf-string.c
strtab tracking layer. It's quite easy: we just add a field csa_offset
to the atoms table that tracks all strings: this field tracks the offset
of the string in the ELF strtab (with its MSB already on, courtesy of a
new macro CTF_SET_STID), and adds a new function that sets the
csa_offset to the specified offset (plus MSB). Then we just need to
avoid writing out strings to the internal strtab if they have csa_offset
set, and note that the internal strtab is shorter than it might
otherwise be.
(We could in theory save a little more time here by eschewing sorting
such strings, since we never actually write the strings out anywhere,
but that would mean storing them separately and it's just not worth the
complexity cost until profiling shows it's worth doing.)
We also have to go through a bit of extra effort at variable-sorting
time. This was previously using direct references to the internal
strtab: it couldn't use ctf_strptr or ctf_strraw because the new strtab
is not yet ready to put in its usual field (in a ctf_file_t that hasn't
even been allocated yet at this stage): but now we're using the external
strtab, this will no longer do because it'll be looking things up in the
wrong strtab, with disastrous results. Instead, pass the new internal
strtab in to a new ctf_strraw_explicit function which is just like
ctf_strraw except you can specify a ne winternal strtab to use.
But even now that it is using a new internal strtab, this is not quite
enough: it can't look up strings in the external strtab because ld
hasn't written it out yet, and when it does will write it straight to
disk. Instead, when we write the internal strtab, note all the offset
-> string mappings that we have noted belong in the *external* strtab to
a new "synthetic external strtab" dynhash, ctf_syn_ext_strtab, and look
in there at ctf_strraw time if it is set. This uses minimal extra
memory (because only strings in the external strtab that we actually use
are stored, and even those come straight out of the atoms table), but
let both variable sorting and name interning when ctf_bufopen is next
called work fine. (This also means that we don't need to filter out
spurious ECTF_STRTAB warnings from ctf_bufopen but can pass them back to
the caller, once we wrap ctf_bufopen so that we have a new internal
variant of ctf_bufopen etc that we can pass the synthetic external
strtab to. That error has been filtered out since the days of Solaris
libctf, which didn't try to handle the problem of getting external
strtabs right at construction time at all.)
v3: add the synthetic strtab and all associated machinery.
v5: fix tabdamage.
include/
* ctf.h (CTF_SET_STID): New.
libctf/
* ctf-impl.h (ctf_str_atom_t) <csa_offset>: New field.
(ctf_file_t) <ctf_syn_ext_strtab>: Likewise.
(ctf_str_add_ref): Name the last arg.
(ctf_str_add_external) New.
(ctf_str_add_strraw_explicit): Likewise.
(ctf_simple_open_internal): Likewise.
(ctf_bufopen_internal): Likewise.
* ctf-string.c (ctf_strraw_explicit): Split from...
(ctf_strraw): ... here, with new support for ctf_syn_ext_strtab.
(ctf_str_add_ref_internal): Return the atom, not the
string.
(ctf_str_add): Adjust accordingly.
(ctf_str_add_ref): Likewise. Move up in the file.
(ctf_str_add_external): New: update the csa_offset.
(ctf_str_count_strtab): Only account for strings with no csa_offset
in the internal strtab length.
(ctf_str_write_strtab): If the csa_offset is set, update the
string's refs without writing the string out, and update the
ctf_syn_ext_strtab. Make OOM handling less ugly.
* ctf-create.c (struct ctf_sort_var_arg_cb): New.
(ctf_update): Handle failure to populate the strtab. Pass in the
new ctf_sort_var arg. Adjust for ctf_syn_ext_strtab addition.
Call ctf_simple_open_internal, not ctf_simple_open.
(ctf_sort_var): Call ctf_strraw_explicit rather than looking up
strings by hand.
* ctf-hash.c (ctf_hash_insert_type): Likewise (but using
ctf_strraw). Adjust to diagnose ECTF_STRTAB nonetheless.
* ctf-open.c (init_types): No longer filter out ECTF_STRTAB.
(ctf_file_close): Destroy the ctf_syn_ext_strtab.
(ctf_simple_open): Rename to, and reimplement as a wrapper around...
(ctf_simple_open_internal): ... this new function, which calls
ctf_bufopen_internal.
(ctf_bufopen): Rename to, and reimplement as a wrapper around...
(ctf_bufopen_internal): ... this new function, which sets
ctf_syn_ext_strtab.
2019-07-14 03:33:01 +08:00
|
|
|
if (str == NULL)
|
2019-04-24 05:12:16 +08:00
|
|
|
return ECTF_BADNAME;
|
|
|
|
|
|
|
|
if (str[0] == '\0')
|
|
|
|
return 0; /* Just ignore empty strings on behalf of caller. */
|
|
|
|
|
|
|
|
if (ctf_hashtab_insert ((struct htab *) hp, (char *) str,
|
2019-07-24 22:21:56 +08:00
|
|
|
(void *) (ptrdiff_t) type, NULL, NULL) != NULL)
|
2019-04-24 05:12:16 +08:00
|
|
|
return 0;
|
|
|
|
return errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if the key is already in the hash, override the previous definition with
|
|
|
|
this new official definition. If the key is not present, then call
|
libctf, hash: introduce the ctf_dynset
There are many places in the deduplicator which use hashtables as tiny
sets: keys with no value (and usually, but not always, no freeing
function) often with only one or a few members. For each of these, even
after the last change to not store the freeing functions, we are storing
a little malloced block for each item just to track the key/value pair,
and a little malloced block for the hash table itself just to track the
freeing function because we can't use libiberty hashtab's freeing
function because we are using that to free the little malloced per-item
block.
If we only have a key, we don't need any of that: we can ditch the
per-malloced block because we don't have a value, and we can ditch the
per-hashtab structure because we don't need to independently track the
freeing functions since libiberty hashtab is doing it for us. That
means we don't need an owner field in the (now nonexistent) item block
either.
Roughly speaking, this datatype saves about 25% in time and 20% in peak
memory usage for normal links, even fairly big ones. So this might seem
redundant, but it's really worth it.
Instead of a _lookup function, a dynset has two distinct functions:
ctf_dynset_exists, which returns true or false and an optional pointer
to the set member, and ctf_dynhash_lookup_any, which is used if all
members of the set are expected to be equivalent and we just want *any*
member and we don't care which one.
There is no iterator in this set of functions, not because we don't
iterate over dynset members -- we do, a lot -- but because the iterator
here is a member of an entirely new family of much more convenient
iteration functions, introduced in the next commit.
libctf/
* ctf-hash.c (ctf_dynset_eq_string): New.
(ctf_dynset_create): New.
(DYNSET_EMPTY_ENTRY_REPLACEMENT): New.
(DYNSET_DELETED_ENTRY_REPLACEMENT): New.
(key_to_internal): New.
(internal_to_key): New.
(ctf_dynset_insert): New.
(ctf_dynset_remove): New.
(ctf_dynset_destroy): New.
(ctf_dynset_lookup): New.
(ctf_dynset_exists): New.
(ctf_dynset_lookup_any): New.
(ctf_hash_insert_type): Coding style.
(ctf_hash_define_type): Likewise.
* ctf-impl.h (ctf_dynset_t): New.
(ctf_dynset_eq_string): New.
(ctf_dynset_create): New.
(ctf_dynset_insert): New.
(ctf_dynset_remove): New.
(ctf_dynset_destroy): New.
(ctf_dynset_lookup): New.
(ctf_dynset_exists): New.
(ctf_dynset_lookup_any): New.
* ctf-inlines.h (ctf_dynset_cinsert): New.
2020-06-03 05:26:38 +08:00
|
|
|
ctf_hash_insert_type and hash it in. */
|
2019-04-24 05:12:16 +08:00
|
|
|
int
|
|
|
|
ctf_hash_define_type (ctf_hash_t *hp, ctf_file_t *fp, uint32_t type,
|
|
|
|
uint32_t name)
|
|
|
|
{
|
libctf, hash: introduce the ctf_dynset
There are many places in the deduplicator which use hashtables as tiny
sets: keys with no value (and usually, but not always, no freeing
function) often with only one or a few members. For each of these, even
after the last change to not store the freeing functions, we are storing
a little malloced block for each item just to track the key/value pair,
and a little malloced block for the hash table itself just to track the
freeing function because we can't use libiberty hashtab's freeing
function because we are using that to free the little malloced per-item
block.
If we only have a key, we don't need any of that: we can ditch the
per-malloced block because we don't have a value, and we can ditch the
per-hashtab structure because we don't need to independently track the
freeing functions since libiberty hashtab is doing it for us. That
means we don't need an owner field in the (now nonexistent) item block
either.
Roughly speaking, this datatype saves about 25% in time and 20% in peak
memory usage for normal links, even fairly big ones. So this might seem
redundant, but it's really worth it.
Instead of a _lookup function, a dynset has two distinct functions:
ctf_dynset_exists, which returns true or false and an optional pointer
to the set member, and ctf_dynhash_lookup_any, which is used if all
members of the set are expected to be equivalent and we just want *any*
member and we don't care which one.
There is no iterator in this set of functions, not because we don't
iterate over dynset members -- we do, a lot -- but because the iterator
here is a member of an entirely new family of much more convenient
iteration functions, introduced in the next commit.
libctf/
* ctf-hash.c (ctf_dynset_eq_string): New.
(ctf_dynset_create): New.
(DYNSET_EMPTY_ENTRY_REPLACEMENT): New.
(DYNSET_DELETED_ENTRY_REPLACEMENT): New.
(key_to_internal): New.
(internal_to_key): New.
(ctf_dynset_insert): New.
(ctf_dynset_remove): New.
(ctf_dynset_destroy): New.
(ctf_dynset_lookup): New.
(ctf_dynset_exists): New.
(ctf_dynset_lookup_any): New.
(ctf_hash_insert_type): Coding style.
(ctf_hash_define_type): Likewise.
* ctf-impl.h (ctf_dynset_t): New.
(ctf_dynset_eq_string): New.
(ctf_dynset_create): New.
(ctf_dynset_insert): New.
(ctf_dynset_remove): New.
(ctf_dynset_destroy): New.
(ctf_dynset_lookup): New.
(ctf_dynset_exists): New.
(ctf_dynset_lookup_any): New.
* ctf-inlines.h (ctf_dynset_cinsert): New.
2020-06-03 05:26:38 +08:00
|
|
|
/* This matches the semantics of ctf_hash_insert_type in this
|
2019-04-24 05:12:16 +08:00
|
|
|
implementation anyway. */
|
|
|
|
|
|
|
|
return ctf_hash_insert_type (hp, fp, type, name);
|
|
|
|
}
|
|
|
|
|
|
|
|
ctf_id_t
|
|
|
|
ctf_hash_lookup_type (ctf_hash_t *hp, ctf_file_t *fp __attribute__ ((__unused__)),
|
|
|
|
const char *key)
|
|
|
|
{
|
|
|
|
ctf_helem_t **slot;
|
|
|
|
|
|
|
|
slot = ctf_hashtab_lookup ((struct htab *) hp, key, NO_INSERT);
|
|
|
|
|
|
|
|
if (slot)
|
|
|
|
return (ctf_id_t) ((*slot)->value);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ctf_hash_destroy (ctf_hash_t *hp)
|
|
|
|
{
|
|
|
|
if (hp != NULL)
|
|
|
|
htab_delete ((struct htab *) hp);
|
|
|
|
}
|