summaryrefslogtreecommitdiffstats
path: root/khtml/html/htmltokenizer.cpp
diff options
context:
space:
mode:
authortpearson <tpearson@283d02a7-25f6-0310-bc7c-ecb5cbfe19da>2011-01-07 03:45:53 +0000
committertpearson <tpearson@283d02a7-25f6-0310-bc7c-ecb5cbfe19da>2011-01-07 03:45:53 +0000
commit10308be19ef7fa44699562cc75946e7ea1fdf6b9 (patch)
tree4bc444c00a79e88105f2cfce5b6209994c413ca0 /khtml/html/htmltokenizer.cpp
parent307136d8eef0ba133b78ceee8e901138d4c996a1 (diff)
downloadtdelibs-10308be19ef7fa44699562cc75946e7ea1fdf6b9.tar.gz
tdelibs-10308be19ef7fa44699562cc75946e7ea1fdf6b9.zip
Revert automated changes
Sorry guys, they are just not ready for prime time Work will continue as always git-svn-id: svn://anonsvn.kde.org/home/kde/branches/trinity/kdelibs@1212479 283d02a7-25f6-0310-bc7c-ecb5cbfe19da
Diffstat (limited to 'khtml/html/htmltokenizer.cpp')
-rw-r--r--khtml/html/htmltokenizer.cpp42
1 files changed, 21 insertions, 21 deletions
diff --git a/khtml/html/htmltokenizer.cpp b/khtml/html/htmltokenizer.cpp
index ba8e99a50..5da6edd14 100644
--- a/khtml/html/htmltokenizer.cpp
+++ b/khtml/html/htmltokenizer.cpp
@@ -89,7 +89,7 @@ static const char titleEnd [] = "</title";
#define fixUpChar(x)
#else
#define fixUpChar(x) \
- switch ((x).tqunicode()) \
+ switch ((x).unicode()) \
{ \
case 0x80: (x) = 0x20ac; break; \
case 0x82: (x) = 0x201a; break; \
@@ -347,7 +347,7 @@ void HTMLTokenizer::parseSpecial(TokenizerString &src)
// possible end of tagname, lets check.
if ( !scriptCodeResync && !escaped && !src.escaped() && ( ch == '>' || ch == '/' || ch <= ' ' ) && ch &&
scriptCodeSize >= searchStopperLen &&
- !TQConstString( scriptCode+scriptCodeSize-searchStopperLen, searchStopperLen ).string().tqfind( searchStopper, 0, false )) {
+ !TQConstString( scriptCode+scriptCodeSize-searchStopperLen, searchStopperLen ).string().find( searchStopper, 0, false )) {
scriptCodeResync = scriptCodeSize-searchStopperLen+1;
tquote = NoQuote;
continue;
@@ -471,7 +471,7 @@ void HTMLTokenizer::parseComment(TokenizerString &src)
if (strict)
{
- if (src->tqunicode() == '-') {
+ if (src->unicode() == '-') {
delimiterCount++;
if (delimiterCount == 2) {
delimiterCount = 0;
@@ -482,7 +482,7 @@ void HTMLTokenizer::parseComment(TokenizerString &src)
delimiterCount = 0;
}
- if ((!strict || canClose) && src->tqunicode() == '>')
+ if ((!strict || canClose) && src->unicode() == '>')
{
bool handleBrokenComments = brokenComments && !( script || style );
bool scriptEnd=false;
@@ -521,7 +521,7 @@ void HTMLTokenizer::parseServer(TokenizerString &src)
checkScriptBuffer(src.length());
while ( !src.isEmpty() ) {
scriptCode[ scriptCodeSize++ ] = *src;
- if (src->tqunicode() == '>' &&
+ if (src->unicode() == '>' &&
scriptCodeSize > 1 && scriptCode[scriptCodeSize-2] == '%') {
++src;
server = false;
@@ -607,7 +607,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
while( !src.isEmpty() )
{
- ushort cc = src->tqunicode();
+ ushort cc = src->unicode();
switch(Entity) {
case NoEntity:
return;
@@ -639,7 +639,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
case Hexadecimal:
{
- int uc = EntityChar.tqunicode();
+ int uc = EntityChar.unicode();
int ll = kMin<uint>(src.length(), 8);
while(ll--) {
TQChar csrc(src->lower());
@@ -658,7 +658,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
}
case Decimal:
{
- int uc = EntityChar.tqunicode();
+ int uc = EntityChar.unicode();
int ll = kMin(src.length(), 9-cBufferPos);
while(ll--) {
cc = src->cell();
@@ -695,7 +695,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
// be IE compatible and interpret even unterminated entities
// outside tags. like "foo &nbspstuff bla".
if ( tag == NoTag ) {
- const entity* e = kde_tqfindEntity(cBuffer, cBufferPos);
+ const entity* e = kde_findEntity(cBuffer, cBufferPos);
if ( e && e->code < 256 ) {
EntityChar = e->code;
entityLen = cBufferPos;
@@ -705,7 +705,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
if(cBufferPos == 9) Entity = SearchSemicolon;
if(Entity == SearchSemicolon) {
if(cBufferPos > 1) {
- const entity *e = kde_tqfindEntity(cBuffer, cBufferPos);
+ const entity *e = kde_findEntity(cBuffer, cBufferPos);
// IE only accepts unterminated entities < 256,
// Gecko accepts them all, but only outside tags
if(e && ( tag == NoTag || e->code < 256 || *src == ';' )) {
@@ -718,7 +718,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
}
case SearchSemicolon:
#ifdef TOKEN_DEBUG
- kdDebug( 6036 ) << "ENTITY " << EntityChar.tqunicode() << endl;
+ kdDebug( 6036 ) << "ENTITY " << EntityChar.unicode() << endl;
#endif
fixUpChar(EntityChar);
@@ -813,7 +813,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
break;
}
// this is a nasty performance trick. will work for the A-Z
- // characters, but not for others. if it tqcontains one,
+ // characters, but not for others. if it contains one,
// we fail anyway
char cc = curchar;
cBuffer[cBufferPos++] = cc | 0x20;
@@ -919,7 +919,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
a = khtml::getAttrID(cBuffer, cBufferPos-1);
}
if (!a)
- attrName = TQString::tqfromLatin1(TQCString(cBuffer, cBufferPos+1).data());
+ attrName = TQString::fromLatin1(TQCString(cBuffer, cBufferPos+1).data());
}
dest = buffer;
@@ -941,7 +941,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
}
if ( cBufferPos == CBUFLEN ) {
cBuffer[cBufferPos] = '\0';
- attrName = TQString::tqfromLatin1(TQCString(cBuffer, cBufferPos+1).data());
+ attrName = TQString::fromLatin1(TQCString(cBuffer, cBufferPos+1).data());
dest = buffer;
*dest++ = 0;
tag = SearchEqual;
@@ -956,7 +956,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
ushort curchar;
bool atespace = false;
while(!src.isEmpty()) {
- curchar = src->tqunicode();
+ curchar = src->unicode();
if(curchar > ' ') {
if(curchar == '=') {
#ifdef TOKEN_DEBUG
@@ -988,7 +988,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
{
ushort curchar;
while(!src.isEmpty()) {
- curchar = src->tqunicode();
+ curchar = src->unicode();
if(curchar > ' ') {
if(( curchar == '\'' || curchar == '\"' )) {
tquote = curchar == '\"' ? DoubleQuote : SingleQuote;
@@ -1012,7 +1012,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
while(!src.isEmpty()) {
checkBuffer();
- curchar = src->tqunicode();
+ curchar = src->unicode();
if(curchar <= '\'' && !src.escaped()) {
// ### attributes like '&{blaa....};' are supposed to be treated as jscript.
if ( curchar == '&' )
@@ -1050,7 +1050,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
ushort curchar;
while(!src.isEmpty()) {
checkBuffer();
- curchar = src->tqunicode();
+ curchar = src->unicode();
if(curchar <= '>' && !src.escaped()) {
// parse Entities
if ( curchar == '&' )
@@ -1351,7 +1351,7 @@ void HTMLTokenizer::write( const TokenizerString &str, bool appendData )
// do we need to enlarge the buffer?
checkBuffer();
- ushort cc = src->tqunicode();
+ ushort cc = src->unicode();
if (skipLF && (cc != '\n'))
skipLF = false;
@@ -1595,7 +1595,7 @@ void HTMLTokenizer::finish()
killTimer( m_autoCloseTimer );
m_autoCloseTimer = 0;
}
- // do this as long as we don't tqfind matching comment ends
+ // do this as long as we don't find matching comment ends
while((title || script || comment || server) && scriptCode && scriptCodeSize)
{
// we've found an unmatched comment start
@@ -1618,7 +1618,7 @@ void HTMLTokenizer::finish()
food += TQString(scriptCode, scriptCodeSize);
}
else {
- pos = TQConstString(scriptCode, scriptCodeSize).string().tqfind('>');
+ pos = TQConstString(scriptCode, scriptCodeSize).string().find('>');
food.setUnicode(scriptCode+pos+1, scriptCodeSize-pos-1); // deep copy
}
KHTML_DELETE_QCHAR_VEC(scriptCode);