summaryrefslogtreecommitdiffstats
path: root/khtml/html/htmltokenizer.cpp
diff options
context:
space:
mode:
authorTimothy Pearson <kb9vqf@pearsoncomputing.net>2011-12-21 14:22:15 -0600
committerSlávek Banko <slavek.banko@axis.cz>2012-06-02 19:05:01 +0200
commitffd8d17b0ba4286d25112d69d0b04bbe50e31b26 (patch)
tree047226d0f2817f5af3c55eb9d31a443fe7b9b61e /khtml/html/htmltokenizer.cpp
parent41b1d53a0144afe4c31425c18af25c2d6ade881b (diff)
downloadtdelibs-ffd8d17b0ba4286d25112d69d0b04bbe50e31b26.tar.gz
tdelibs-ffd8d17b0ba4286d25112d69d0b04bbe50e31b26.zip
Rename obsolete tq methods to standard names
(cherry picked from commit 1180237ab336226ad932d767a6cb56208314988f)
Diffstat (limited to 'khtml/html/htmltokenizer.cpp')
-rw-r--r--khtml/html/htmltokenizer.cpp26
1 files changed, 13 insertions, 13 deletions
diff --git a/khtml/html/htmltokenizer.cpp b/khtml/html/htmltokenizer.cpp
index 778b9fe3a..5da6edd14 100644
--- a/khtml/html/htmltokenizer.cpp
+++ b/khtml/html/htmltokenizer.cpp
@@ -89,7 +89,7 @@ static const char titleEnd [] = "</title";
#define fixUpChar(x)
#else
#define fixUpChar(x) \
- switch ((x).tqunicode()) \
+ switch ((x).unicode()) \
{ \
case 0x80: (x) = 0x20ac; break; \
case 0x82: (x) = 0x201a; break; \
@@ -471,7 +471,7 @@ void HTMLTokenizer::parseComment(TokenizerString &src)
if (strict)
{
- if (src->tqunicode() == '-') {
+ if (src->unicode() == '-') {
delimiterCount++;
if (delimiterCount == 2) {
delimiterCount = 0;
@@ -482,7 +482,7 @@ void HTMLTokenizer::parseComment(TokenizerString &src)
delimiterCount = 0;
}
- if ((!strict || canClose) && src->tqunicode() == '>')
+ if ((!strict || canClose) && src->unicode() == '>')
{
bool handleBrokenComments = brokenComments && !( script || style );
bool scriptEnd=false;
@@ -521,7 +521,7 @@ void HTMLTokenizer::parseServer(TokenizerString &src)
checkScriptBuffer(src.length());
while ( !src.isEmpty() ) {
scriptCode[ scriptCodeSize++ ] = *src;
- if (src->tqunicode() == '>' &&
+ if (src->unicode() == '>' &&
scriptCodeSize > 1 && scriptCode[scriptCodeSize-2] == '%') {
++src;
server = false;
@@ -607,7 +607,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
while( !src.isEmpty() )
{
- ushort cc = src->tqunicode();
+ ushort cc = src->unicode();
switch(Entity) {
case NoEntity:
return;
@@ -639,7 +639,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
case Hexadecimal:
{
- int uc = EntityChar.tqunicode();
+ int uc = EntityChar.unicode();
int ll = kMin<uint>(src.length(), 8);
while(ll--) {
TQChar csrc(src->lower());
@@ -658,7 +658,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
}
case Decimal:
{
- int uc = EntityChar.tqunicode();
+ int uc = EntityChar.unicode();
int ll = kMin(src.length(), 9-cBufferPos);
while(ll--) {
cc = src->cell();
@@ -718,7 +718,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
}
case SearchSemicolon:
#ifdef TOKEN_DEBUG
- kdDebug( 6036 ) << "ENTITY " << EntityChar.tqunicode() << endl;
+ kdDebug( 6036 ) << "ENTITY " << EntityChar.unicode() << endl;
#endif
fixUpChar(EntityChar);
@@ -956,7 +956,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
ushort curchar;
bool atespace = false;
while(!src.isEmpty()) {
- curchar = src->tqunicode();
+ curchar = src->unicode();
if(curchar > ' ') {
if(curchar == '=') {
#ifdef TOKEN_DEBUG
@@ -988,7 +988,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
{
ushort curchar;
while(!src.isEmpty()) {
- curchar = src->tqunicode();
+ curchar = src->unicode();
if(curchar > ' ') {
if(( curchar == '\'' || curchar == '\"' )) {
tquote = curchar == '\"' ? DoubleQuote : SingleQuote;
@@ -1012,7 +1012,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
while(!src.isEmpty()) {
checkBuffer();
- curchar = src->tqunicode();
+ curchar = src->unicode();
if(curchar <= '\'' && !src.escaped()) {
// ### attributes like '&{blaa....};' are supposed to be treated as jscript.
if ( curchar == '&' )
@@ -1050,7 +1050,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
ushort curchar;
while(!src.isEmpty()) {
checkBuffer();
- curchar = src->tqunicode();
+ curchar = src->unicode();
if(curchar <= '>' && !src.escaped()) {
// parse Entities
if ( curchar == '&' )
@@ -1351,7 +1351,7 @@ void HTMLTokenizer::write( const TokenizerString &str, bool appendData )
// do we need to enlarge the buffer?
checkBuffer();
- ushort cc = src->tqunicode();
+ ushort cc = src->unicode();
if (skipLF && (cc != '\n'))
skipLF = false;