mirror of
https://git.postgresql.org/git/postgresql.git
synced 2025-03-07 19:47:50 +08:00
Make plpython_unicode regression test work in more database encodings.
This test previously used a data value containing U+0080, and would therefore fail if the database encoding didn't have an equivalent to that; which only about half of our supported server encodings do. We could fall back to using some plain-ASCII character, but that seems like it's losing most of the point of the test. Instead switch to using U+00A0 (no-break space), which translates into all our supported encodings except the four in the EUC_xx family. Per buildfarm testing. Back-patch to 9.1, which is as far back as this test is expected to succeed everywhere. (9.0 has the test, but without back-patching some 9.1 code changes we could not expect to get consistent results across platforms anyway.)
This commit is contained in:
parent
44445b28d2
commit
2dfa15de55
@ -1,22 +1,27 @@
|
||||
--
|
||||
-- Unicode handling
|
||||
--
|
||||
-- Note: this test case is known to fail if the database encoding is
|
||||
-- EUC_CN, EUC_JP, EUC_KR, or EUC_TW, for lack of any equivalent to
|
||||
-- U+00A0 (no-break space) in those encodings. However, testing with
|
||||
-- plain ASCII data would be rather useless, so we must live with that.
|
||||
--
|
||||
SET client_encoding TO UTF8;
|
||||
CREATE TABLE unicode_test (
|
||||
testvalue text NOT NULL
|
||||
);
|
||||
CREATE FUNCTION unicode_return() RETURNS text AS E'
|
||||
return u"\\x80"
|
||||
return u"\\xA0"
|
||||
' LANGUAGE plpythonu;
|
||||
CREATE FUNCTION unicode_trigger() RETURNS trigger AS E'
|
||||
TD["new"]["testvalue"] = u"\\x80"
|
||||
TD["new"]["testvalue"] = u"\\xA0"
|
||||
return "MODIFY"
|
||||
' LANGUAGE plpythonu;
|
||||
CREATE TRIGGER unicode_test_bi BEFORE INSERT ON unicode_test
|
||||
FOR EACH ROW EXECUTE PROCEDURE unicode_trigger();
|
||||
CREATE FUNCTION unicode_plan1() RETURNS text AS E'
|
||||
plan = plpy.prepare("SELECT $1 AS testvalue", ["text"])
|
||||
rv = plpy.execute(plan, [u"\\x80"], 1)
|
||||
rv = plpy.execute(plan, [u"\\xA0"], 1)
|
||||
return rv[0]["testvalue"]
|
||||
' LANGUAGE plpythonu;
|
||||
CREATE FUNCTION unicode_plan2() RETURNS text AS E'
|
||||
@ -27,20 +32,20 @@ return rv[0]["testvalue"]
|
||||
SELECT unicode_return();
|
||||
unicode_return
|
||||
----------------
|
||||
\u0080
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO unicode_test (testvalue) VALUES ('test');
|
||||
SELECT * FROM unicode_test;
|
||||
testvalue
|
||||
-----------
|
||||
\u0080
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT unicode_plan1();
|
||||
unicode_plan1
|
||||
---------------
|
||||
\u0080
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT unicode_plan2();
|
||||
|
@ -1,6 +1,11 @@
|
||||
--
|
||||
-- Unicode handling
|
||||
--
|
||||
-- Note: this test case is known to fail if the database encoding is
|
||||
-- EUC_CN, EUC_JP, EUC_KR, or EUC_TW, for lack of any equivalent to
|
||||
-- U+00A0 (no-break space) in those encodings. However, testing with
|
||||
-- plain ASCII data would be rather useless, so we must live with that.
|
||||
--
|
||||
|
||||
SET client_encoding TO UTF8;
|
||||
|
||||
@ -9,11 +14,11 @@ CREATE TABLE unicode_test (
|
||||
);
|
||||
|
||||
CREATE FUNCTION unicode_return() RETURNS text AS E'
|
||||
return u"\\x80"
|
||||
return u"\\xA0"
|
||||
' LANGUAGE plpythonu;
|
||||
|
||||
CREATE FUNCTION unicode_trigger() RETURNS trigger AS E'
|
||||
TD["new"]["testvalue"] = u"\\x80"
|
||||
TD["new"]["testvalue"] = u"\\xA0"
|
||||
return "MODIFY"
|
||||
' LANGUAGE plpythonu;
|
||||
|
||||
@ -22,7 +27,7 @@ CREATE TRIGGER unicode_test_bi BEFORE INSERT ON unicode_test
|
||||
|
||||
CREATE FUNCTION unicode_plan1() RETURNS text AS E'
|
||||
plan = plpy.prepare("SELECT $1 AS testvalue", ["text"])
|
||||
rv = plpy.execute(plan, [u"\\x80"], 1)
|
||||
rv = plpy.execute(plan, [u"\\xA0"], 1)
|
||||
return rv[0]["testvalue"]
|
||||
' LANGUAGE plpythonu;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user