mirror of
https://github.com/netwide-assembler/nasm.git
synced 2025-04-12 18:40:23 +08:00
Change the token prehash function for better convergence
Combining arithmetric (add) and bitwise (xor) mixing seems to give better result than either. With the new prehash function, we find a valid hash much quicker.
This commit is contained in:
parent
3ce3715fba
commit
5255fd1f36
@ -42,8 +42,8 @@ sub prehash($$$) {
|
||||
|
||||
foreach $c (unpack("C*", $key)) {
|
||||
$ko1 = $k1; $ko2 = $k2;
|
||||
$k1 = int32(rot($ko1,$s0)-rot($ko2, $s1)+$c);
|
||||
$k2 = int32(rot($ko2,$s2)-rot($ko1, $s3)+$c);
|
||||
$k1 = int32(rot($ko1,$s0)^int32(rot($ko2, $s1)+$c));
|
||||
$k2 = int32(rot($ko2,$s2)^int32(rot($ko1, $s3)+$c));
|
||||
}
|
||||
|
||||
# Create a bipartite graph...
|
||||
|
4
pptok.pl
4
pptok.pl
@ -191,8 +191,8 @@ if ($what eq 'c') {
|
||||
print OUT " while ((c = *p++) != 0) {\n";
|
||||
print OUT " uint32_t kn1, kn2;\n";
|
||||
print OUT " c |= 0x20; /* convert to lower case */\n";
|
||||
printf OUT " kn1 = rot(k1,%2d) - rot(k2,%2d) + c;\n", ${$sv}[0], ${$sv}[1];
|
||||
printf OUT " kn2 = rot(k2,%2d) - rot(k1,%2d) + c;\n", ${$sv}[2], ${$sv}[3];
|
||||
printf OUT " kn1 = rot(k1,%2d)^(rot(k2,%2d) + c);\n", ${$sv}[0], ${$sv}[1];
|
||||
printf OUT " kn2 = rot(k2,%2d)^(rot(k1,%2d) + c);\n", ${$sv}[2], ${$sv}[3];
|
||||
print OUT " k1 = kn1; k2 = kn2;\n";
|
||||
print OUT " }\n";
|
||||
print OUT "\n";
|
||||
|
@ -187,8 +187,8 @@ print " const char *p = token;\n";
|
||||
print "\n";
|
||||
|
||||
print " while ((c = *p++) != 0) {\n";
|
||||
printf " uint32_t kn1 = rot(k1,%2d) - rot(k2,%2d) + c;\n", ${$sv}[0], ${$sv}[1];
|
||||
printf " uint32_t kn2 = rot(k2,%2d) - rot(k1,%2d) + c;\n", ${$sv}[2], ${$sv}[3];
|
||||
printf " uint32_t kn1 = rot(k1,%2d)^(rot(k2,%2d) + c);\n", ${$sv}[0], ${$sv}[1];
|
||||
printf " uint32_t kn2 = rot(k2,%2d)^(rot(k1,%2d) + c);\n", ${$sv}[2], ${$sv}[3];
|
||||
print " k1 = kn1; k2 = kn2;\n";
|
||||
print " }\n";
|
||||
print "\n";
|
||||
|
Loading…
x
Reference in New Issue
Block a user