2023-06-05 03:30:48 +03:00
|
|
|
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
|
|
// will be reworked in MTQ-32
|
2022-06-30 16:32:56 +03:00
|
|
|
import Long from 'long'
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
import { mtp, tl } from '@mtcute/tl'
|
2022-06-30 16:32:56 +03:00
|
|
|
import {
|
|
|
|
TlBinaryReader,
|
|
|
|
TlBinaryWriter,
|
|
|
|
TlReaderMap,
|
|
|
|
TlSerializationCounter,
|
|
|
|
TlWriterMap,
|
|
|
|
} from '@mtcute/tl-runtime'
|
|
|
|
import { gzipDeflate, gzipInflate } from '@mtcute/tl-runtime/src/platform/gzip'
|
|
|
|
|
|
|
|
import {
|
2023-06-05 03:30:48 +03:00
|
|
|
ControllablePromise,
|
|
|
|
createCancellablePromise,
|
2023-06-10 00:37:26 +03:00
|
|
|
createControllablePromise,
|
2023-06-05 03:30:48 +03:00
|
|
|
EarlyTimer,
|
2023-06-10 00:37:26 +03:00
|
|
|
longFromBuffer,
|
|
|
|
randomBytes,
|
2022-06-30 16:32:56 +03:00
|
|
|
randomLong,
|
|
|
|
removeFromLongArray,
|
|
|
|
} from '../utils'
|
2023-06-10 00:37:26 +03:00
|
|
|
import { createAesIgeForMessageOld } from '../utils/crypto/mtproto'
|
2021-11-23 00:03:59 +03:00
|
|
|
import { doAuthorization } from './authorization'
|
2023-06-10 00:37:26 +03:00
|
|
|
import { MtprotoSession, PendingMessage, PendingRpc } from './mtproto-session'
|
2021-11-23 00:03:59 +03:00
|
|
|
import {
|
2022-06-30 16:32:56 +03:00
|
|
|
PersistentConnection,
|
|
|
|
PersistentConnectionParams,
|
|
|
|
} from './persistent-connection'
|
2023-06-05 03:30:48 +03:00
|
|
|
import { TransportError } from './transports'
|
2022-11-07 00:08:59 +03:00
|
|
|
|
2022-11-08 06:50:49 +03:00
|
|
|
const TEMP_AUTH_KEY_EXPIRY = 86400
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
export interface SessionConnectionParams extends PersistentConnectionParams {
|
|
|
|
initConnection: tl.RawInitConnectionRequest
|
|
|
|
inactivityTimeout?: number
|
|
|
|
niceStacks?: boolean
|
|
|
|
layer: number
|
|
|
|
disableUpdates?: boolean
|
2022-11-05 03:03:21 +03:00
|
|
|
isMainConnection: boolean
|
2022-11-06 02:27:46 +03:00
|
|
|
usePfs?: boolean
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
readerMap: TlReaderMap
|
|
|
|
writerMap: TlWriterMap
|
|
|
|
}
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
// destroy_auth_key#d1435160 = DestroyAuthKeyRes;
|
2022-11-08 06:50:49 +03:00
|
|
|
// const DESTROY_AUTH_KEY = Buffer.from('605134d1', 'hex')
|
2021-11-23 00:03:59 +03:00
|
|
|
|
2022-06-30 16:32:56 +03:00
|
|
|
function makeNiceStack(
|
|
|
|
error: tl.errors.RpcError,
|
|
|
|
stack: string,
|
2023-06-05 03:30:48 +03:00
|
|
|
method?: string,
|
2022-06-30 16:32:56 +03:00
|
|
|
) {
|
2021-11-23 00:03:59 +03:00
|
|
|
error.stack = `${error.constructor.name} (${error.code} ${error.text}): ${
|
|
|
|
error.message
|
|
|
|
}\n at ${method}\n${stack.split('\n').slice(2).join('\n')}`
|
|
|
|
}
|
|
|
|
|
2022-08-29 16:22:57 +03:00
|
|
|
/**
|
|
|
|
* A connection to a single DC.
|
|
|
|
*/
|
2021-11-23 00:03:59 +03:00
|
|
|
export class SessionConnection extends PersistentConnection {
|
|
|
|
readonly params!: SessionConnectionParams
|
|
|
|
|
|
|
|
private _flushTimer = new EarlyTimer()
|
|
|
|
private _queuedDestroySession: Long[] = []
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
// waitForMessage
|
|
|
|
private _pendingWaitForUnencrypted: [
|
|
|
|
ControllablePromise<Buffer>,
|
|
|
|
NodeJS.Timeout
|
|
|
|
][] = []
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
private _next429Timeout = 1000
|
|
|
|
private _current429Timeout?: NodeJS.Timeout
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
private _lastPingRtt = NaN
|
|
|
|
private _lastPingTime = 0
|
|
|
|
private _lastPingMsgId = Long.ZERO
|
|
|
|
private _lastSessionCreatedUid = Long.ZERO
|
|
|
|
|
2022-11-14 03:24:18 +03:00
|
|
|
private _usePfs = this.params.usePfs ?? false
|
2022-11-06 02:27:46 +03:00
|
|
|
private _isPfsBindingPending = false
|
|
|
|
private _isPfsBindingPendingInBackground = false
|
|
|
|
private _pfsUpdateTimeout?: NodeJS.Timeout
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
private _readerMap: TlReaderMap
|
|
|
|
private _writerMap: TlWriterMap
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
constructor(
|
|
|
|
params: SessionConnectionParams,
|
2023-06-10 00:37:26 +03:00
|
|
|
readonly _session: MtprotoSession,
|
2022-11-05 03:03:21 +03:00
|
|
|
) {
|
|
|
|
super(params, _session.log.create('conn'))
|
2021-11-23 00:03:59 +03:00
|
|
|
this._flushTimer.onTimeout(this._flush.bind(this))
|
|
|
|
|
|
|
|
this._readerMap = params.readerMap
|
|
|
|
this._writerMap = params.writerMap
|
|
|
|
this._handleRawMessage = this._handleRawMessage.bind(this)
|
|
|
|
}
|
|
|
|
|
2022-11-06 02:27:46 +03:00
|
|
|
getAuthKey(temp = false): Buffer | null {
|
|
|
|
const key = temp ? this._session._authKeyTemp : this._session._authKey
|
2021-11-23 00:03:59 +03:00
|
|
|
|
2022-11-06 02:27:46 +03:00
|
|
|
if (!key.ready) return null
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-06 02:27:46 +03:00
|
|
|
return key.key
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
2022-11-14 03:24:18 +03:00
|
|
|
setUsePfs(usePfs: boolean): void {
|
|
|
|
if (this._usePfs === usePfs) return
|
|
|
|
|
|
|
|
this.log.debug('use pfs changed to %s', usePfs)
|
|
|
|
this._usePfs = usePfs
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-14 03:24:18 +03:00
|
|
|
if (!usePfs) {
|
|
|
|
this._isPfsBindingPending = false
|
|
|
|
this._isPfsBindingPendingInBackground = false
|
|
|
|
this._session._authKeyTemp.reset()
|
|
|
|
clearTimeout(this._pfsUpdateTimeout!)
|
|
|
|
}
|
|
|
|
|
|
|
|
this._resetSession()
|
|
|
|
}
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
onTransportClose(): void {
|
|
|
|
super.onTransportClose()
|
2022-11-07 00:08:59 +03:00
|
|
|
|
2023-08-11 23:59:24 +03:00
|
|
|
Object.values(this._pendingWaitForUnencrypted).forEach(
|
|
|
|
([prom, timeout]) => {
|
|
|
|
prom.reject(new Error('Connection closed'))
|
|
|
|
clearTimeout(timeout)
|
|
|
|
},
|
|
|
|
)
|
2022-11-07 00:08:59 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
this.emit('disconnect')
|
|
|
|
|
|
|
|
this.reset()
|
|
|
|
}
|
|
|
|
|
|
|
|
destroy(): void {
|
|
|
|
super.destroy()
|
|
|
|
this.reset(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
reset(forever = false): void {
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.initConnectionCalled = false
|
2021-11-23 00:03:59 +03:00
|
|
|
this._resetLastPing(true)
|
|
|
|
this._flushTimer.reset()
|
|
|
|
clearTimeout(this._current429Timeout!)
|
|
|
|
|
|
|
|
if (forever) {
|
|
|
|
this.removeAllListeners()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-14 00:37:57 +03:00
|
|
|
onConnected(): void {
|
2022-11-07 00:08:59 +03:00
|
|
|
// check if we have all the needed keys
|
2022-11-06 02:27:46 +03:00
|
|
|
if (!this._session._authKey.ready) {
|
2022-11-14 00:37:57 +03:00
|
|
|
if (!this.params.isMainConnection) {
|
|
|
|
this.log.info('no auth key, waiting for main connection')
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-14 00:37:57 +03:00
|
|
|
// once it is done, we will be notified
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
this.log.info('no perm auth key, authorizing...')
|
2021-11-23 00:03:59 +03:00
|
|
|
this._authorize()
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2023-08-11 23:59:24 +03:00
|
|
|
// if we use pfs, we *could* also start temp key exchange here
|
|
|
|
// but telegram restricts us to only have one auth session per connection,
|
|
|
|
// and having a separate connection for pfs is not worth it
|
2022-11-14 00:37:57 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-14 03:24:18 +03:00
|
|
|
if (this._usePfs && !this._session._authKeyTemp.ready) {
|
2022-11-08 06:50:49 +03:00
|
|
|
this.log.info('no temp auth key but using pfs, authorizing')
|
2022-11-07 00:08:59 +03:00
|
|
|
this._authorizePfs()
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-14 00:37:57 +03:00
|
|
|
return
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
2022-11-14 00:37:57 +03:00
|
|
|
|
|
|
|
this.log.info('auth keys are already available')
|
|
|
|
this.onConnectionUsable()
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
protected onError(error: Error): void {
|
|
|
|
// https://core.telegram.org/mtproto/mtproto-_transports#_transport-errors
|
|
|
|
if (error instanceof TransportError) {
|
|
|
|
if (error.code === 404) {
|
2022-11-07 00:08:59 +03:00
|
|
|
// if we are using pfs, this could be due to the server
|
|
|
|
// forgetting our temp key (which is kinda weird but expected)
|
|
|
|
|
2022-11-14 03:24:18 +03:00
|
|
|
if (this._usePfs) {
|
2022-11-07 00:08:59 +03:00
|
|
|
if (
|
|
|
|
!this._isPfsBindingPending &&
|
|
|
|
this._session._authKeyTemp.ready
|
|
|
|
) {
|
2022-11-14 00:37:57 +03:00
|
|
|
this.log.info('transport error 404, reauthorizing pfs')
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
// this is important! we must reset temp auth key before
|
|
|
|
// we proceed with new temp key derivation.
|
|
|
|
// otherwise, we can end up in an infinite loop in case it
|
|
|
|
// was actually perm_key that got 404-ed
|
|
|
|
//
|
|
|
|
// if temp key binding is already in process in background,
|
|
|
|
// _authorizePfs will mark it as foreground to prevent new
|
|
|
|
// queries from being sent (to avoid even more 404s)
|
|
|
|
this._session._authKeyTemp.reset()
|
|
|
|
this._authorizePfs()
|
|
|
|
this._onAllFailed('temp key expired, binding started')
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
return
|
|
|
|
} else if (this._isPfsBindingPending) {
|
2023-08-11 23:59:24 +03:00
|
|
|
this.log.info(
|
|
|
|
'transport error 404, pfs binding in progress',
|
|
|
|
)
|
2022-11-14 00:37:57 +03:00
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
this._onAllFailed('temp key expired, binding pending')
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// otherwise, 404 must be referencing the perm_key
|
2022-11-14 00:37:57 +03:00
|
|
|
this.log.info('transport error 404, reauthorizing')
|
2022-11-07 00:08:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// there happened a little trolling
|
2022-11-14 00:37:57 +03:00
|
|
|
this._session.reset(true)
|
2021-11-23 00:03:59 +03:00
|
|
|
this.emit('key-change', null)
|
|
|
|
this._authorize()
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
this.log.error('transport error %d', error.code)
|
|
|
|
// all pending queries must be resent
|
|
|
|
this._onAllFailed(`transport error ${error.code}`)
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (error.code === 429) {
|
|
|
|
// all active queries must be resent
|
|
|
|
const timeout = this._next429Timeout
|
|
|
|
|
|
|
|
this._next429Timeout = Math.min(this._next429Timeout * 2, 16000)
|
|
|
|
clearTimeout(this._current429Timeout!)
|
|
|
|
this._current429Timeout = setTimeout(() => {
|
|
|
|
this._current429Timeout = undefined
|
|
|
|
this._flushTimer.emitNow()
|
|
|
|
}, timeout)
|
|
|
|
|
|
|
|
this.log.debug(
|
|
|
|
'transport flood, waiting for %d ms before proceeding',
|
2023-06-05 03:30:48 +03:00
|
|
|
timeout,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-08 06:50:49 +03:00
|
|
|
this.emit('error', error)
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
protected onConnectionUsable() {
|
|
|
|
super.onConnectionUsable()
|
|
|
|
|
|
|
|
// just in case
|
|
|
|
this._flushTimer.emitBeforeNext(1000)
|
|
|
|
}
|
|
|
|
|
2022-11-14 00:37:57 +03:00
|
|
|
_authorize(): void {
|
|
|
|
if (this._session.authorizationPending) {
|
|
|
|
this.log.info('_authorize(): authorization already in progress')
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-14 00:37:57 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!this.params.isMainConnection) {
|
|
|
|
// we don't authorize on non-main connections
|
|
|
|
this.log.debug('_authorize(): non-main connection, requesting...')
|
|
|
|
this.emit('request-auth')
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-14 00:37:57 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
this._session.authorizationPending = true
|
|
|
|
this.emit('auth-begin')
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
doAuthorization(this, this.params.crypto)
|
|
|
|
.then(async ([authKey, serverSalt, timeOffset]) => {
|
2022-11-06 02:27:46 +03:00
|
|
|
await this._session._authKey.setup(authKey)
|
2021-11-23 00:03:59 +03:00
|
|
|
this._session.serverSalt = serverSalt
|
|
|
|
this._session._timeOffset = timeOffset
|
|
|
|
|
2022-11-14 00:37:57 +03:00
|
|
|
this._session.authorizationPending = false
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
this.emit('key-change', authKey)
|
|
|
|
|
2022-11-14 03:24:18 +03:00
|
|
|
if (this._usePfs) {
|
2022-11-07 00:08:59 +03:00
|
|
|
return this._authorizePfs()
|
|
|
|
}
|
2023-06-10 00:37:26 +03:00
|
|
|
this.onConnectionUsable()
|
2021-11-23 00:03:59 +03:00
|
|
|
})
|
|
|
|
.catch((err) => {
|
2022-11-14 00:37:57 +03:00
|
|
|
this._session.authorizationPending = false
|
2021-11-23 00:03:59 +03:00
|
|
|
this.log.error('Authorization error: %s', err.message)
|
|
|
|
this.onError(err)
|
|
|
|
this.reconnect()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
private _authorizePfs(background = false): void {
|
|
|
|
if (this._isPfsBindingPending) return
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
if (this._pfsUpdateTimeout) {
|
|
|
|
clearTimeout(this._pfsUpdateTimeout)
|
|
|
|
this._pfsUpdateTimeout = undefined
|
|
|
|
}
|
|
|
|
|
|
|
|
if (this._isPfsBindingPendingInBackground) {
|
|
|
|
// e.g. temp key has expired while we were binding a key in the background
|
|
|
|
// in this case, we shouldn't start pfs binding again, and instead wait for
|
|
|
|
// current operation to complete
|
|
|
|
this._isPfsBindingPendingInBackground = false
|
|
|
|
this._isPfsBindingPending = true
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if (background) {
|
|
|
|
this._isPfsBindingPendingInBackground = true
|
|
|
|
} else {
|
|
|
|
this._isPfsBindingPending = true
|
|
|
|
}
|
|
|
|
|
|
|
|
doAuthorization(this, this.params.crypto, TEMP_AUTH_KEY_EXPIRY)
|
|
|
|
.then(async ([tempAuthKey, tempServerSalt]) => {
|
2022-11-14 03:24:18 +03:00
|
|
|
if (!this._usePfs) {
|
2023-08-11 23:59:24 +03:00
|
|
|
this.log.info(
|
|
|
|
'pfs has been disabled while generating temp key',
|
|
|
|
)
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-14 03:24:18 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
const tempKey = await this._session._authKeyTempSecondary
|
|
|
|
await tempKey.setup(tempAuthKey)
|
|
|
|
|
|
|
|
const msgId = this._session.getMessageId()
|
|
|
|
|
|
|
|
this.log.debug(
|
|
|
|
'binding temp_auth_key (%h) to perm_auth_key (%h), msg_id = %l...',
|
|
|
|
tempKey.id,
|
|
|
|
this._session._authKey.id,
|
2023-06-10 00:37:26 +03:00
|
|
|
msgId,
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// we now need to bind the key
|
|
|
|
const inner: mtp.RawMt_bind_auth_key_inner = {
|
|
|
|
_: 'mt_bind_auth_key_inner',
|
|
|
|
nonce: randomLong(),
|
|
|
|
tempAuthKeyId: longFromBuffer(tempKey.id),
|
|
|
|
permAuthKeyId: longFromBuffer(this._session._authKey.id),
|
|
|
|
tempSessionId: this._session._sessionId,
|
|
|
|
expiresAt:
|
|
|
|
Math.floor(Date.now() / 1000) + TEMP_AUTH_KEY_EXPIRY,
|
|
|
|
}
|
|
|
|
|
|
|
|
// encrypt using mtproto v1 (fucking kill me plz)
|
|
|
|
|
|
|
|
const writer = TlBinaryWriter.alloc(this.params.writerMap, 80)
|
|
|
|
// = 40 (inner length) + 32 (mtproto header) + 8 (pad 72 so mod 16 = 0)
|
|
|
|
|
|
|
|
writer.raw(randomBytes(16))
|
|
|
|
writer.long(msgId)
|
|
|
|
writer.int(0) // seq_no
|
|
|
|
writer.int(40) // msg_len
|
|
|
|
writer.object(inner)
|
|
|
|
|
|
|
|
const msgWithoutPadding = writer.result()
|
|
|
|
writer.raw(randomBytes(8))
|
|
|
|
const msgWithPadding = writer.result()
|
|
|
|
|
|
|
|
const hash = await this.params.crypto.sha1(msgWithoutPadding)
|
|
|
|
const msgKey = hash.slice(4, 20)
|
|
|
|
|
|
|
|
const ige = await createAesIgeForMessageOld(
|
|
|
|
this.params.crypto,
|
|
|
|
this._session._authKey.key,
|
|
|
|
msgKey,
|
2023-06-10 00:37:26 +03:00
|
|
|
true,
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
|
|
|
const encryptedData = await ige.encrypt(msgWithPadding)
|
|
|
|
const encryptedMessage = Buffer.concat([
|
|
|
|
this._session._authKey.id,
|
|
|
|
msgKey,
|
|
|
|
encryptedData,
|
|
|
|
])
|
|
|
|
|
2023-08-11 23:59:24 +03:00
|
|
|
const promise = createControllablePromise<
|
|
|
|
mtp.RawMt_rpc_error | boolean
|
|
|
|
>()
|
2022-11-07 00:08:59 +03:00
|
|
|
|
|
|
|
// encrypt the message using temp key and same msg id
|
|
|
|
// this is a bit of a hack, but it works
|
|
|
|
//
|
|
|
|
// hacking inside main send loop to allow sending
|
|
|
|
// with another key is just too much hassle.
|
|
|
|
// we could just always use temp key if one is available,
|
|
|
|
// but that way we won't be able to refresh the key
|
|
|
|
// that is about to expire in the background without
|
|
|
|
// interrupting actual message flow
|
|
|
|
// decrypting is trivial though, since key id
|
|
|
|
// is in the first bytes of the message, and is never used later on.
|
|
|
|
|
|
|
|
this._session.pendingMessages.set(msgId, {
|
|
|
|
_: 'bind',
|
|
|
|
promise,
|
|
|
|
})
|
|
|
|
|
|
|
|
const request: tl.auth.RawBindTempAuthKeyRequest = {
|
|
|
|
_: 'auth.bindTempAuthKey',
|
|
|
|
permAuthKeyId: inner.permAuthKeyId,
|
|
|
|
nonce: inner.nonce,
|
|
|
|
expiresAt: inner.expiresAt,
|
|
|
|
encryptedMessage,
|
|
|
|
}
|
|
|
|
const reqSize = TlSerializationCounter.countNeededBytes(
|
|
|
|
this._writerMap,
|
2023-06-10 00:37:26 +03:00
|
|
|
request,
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
|
|
|
const reqWriter = TlBinaryWriter.alloc(
|
|
|
|
this._writerMap,
|
2023-06-10 00:37:26 +03:00
|
|
|
reqSize + 16,
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
|
|
|
reqWriter.long(this._registerOutgoingMsgId(msgId))
|
|
|
|
reqWriter.uint(this._session.getSeqNo())
|
|
|
|
reqWriter.uint(reqSize)
|
|
|
|
reqWriter.object(request)
|
|
|
|
|
|
|
|
// we can now send it as is
|
|
|
|
const requestEncrypted = await tempKey.encryptMessage(
|
|
|
|
reqWriter.result(),
|
|
|
|
tempServerSalt,
|
2023-06-10 00:37:26 +03:00
|
|
|
this._session._sessionId,
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
|
|
|
await this.send(requestEncrypted)
|
|
|
|
|
2023-06-10 00:37:26 +03:00
|
|
|
const res = await promise
|
2022-11-07 00:08:59 +03:00
|
|
|
|
|
|
|
this._session.pendingMessages.delete(msgId)
|
|
|
|
|
2022-11-14 03:24:18 +03:00
|
|
|
if (!this._usePfs) {
|
2023-08-11 23:59:24 +03:00
|
|
|
this.log.info(
|
|
|
|
'pfs has been disabled while binding temp key',
|
|
|
|
)
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-14 03:24:18 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
if (typeof res === 'object') {
|
|
|
|
this.log.error(
|
|
|
|
'failed to bind temp key: %s:%s',
|
|
|
|
res.errorCode,
|
2023-06-10 00:37:26 +03:00
|
|
|
res.errorMessage,
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
|
|
|
throw new Error('Failed to bind temporary key')
|
|
|
|
}
|
|
|
|
|
|
|
|
// now we can swap the keys (secondary becomes primary,
|
|
|
|
// and primary is not immediately forgot because messages using it may still be in flight)
|
|
|
|
|
|
|
|
this._session._authKeyTempSecondary = this._session._authKeyTemp
|
|
|
|
this._session._authKeyTemp = tempKey
|
|
|
|
this._session.serverSalt = tempServerSalt
|
|
|
|
|
|
|
|
this.log.debug(
|
|
|
|
'temp key has been bound, exp = %d',
|
2023-06-10 00:37:26 +03:00
|
|
|
inner.expiresAt,
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
this._isPfsBindingPending = false
|
|
|
|
this._isPfsBindingPendingInBackground = false
|
|
|
|
|
|
|
|
// we must re-init connection after binding temp key
|
|
|
|
this._session.initConnectionCalled = false
|
|
|
|
|
|
|
|
this.emit('tmp-key-change', tempAuthKey, inner.expiresAt)
|
|
|
|
this.onConnectionUsable()
|
|
|
|
|
|
|
|
// set a timeout to update temp auth key in advance to avoid interruption
|
|
|
|
this._pfsUpdateTimeout = setTimeout(() => {
|
|
|
|
this._pfsUpdateTimeout = undefined
|
|
|
|
this.log.debug('temp key is expiring soon')
|
|
|
|
this._authorizePfs(true)
|
|
|
|
}, (TEMP_AUTH_KEY_EXPIRY - 60) * 1000)
|
|
|
|
})
|
|
|
|
.catch((err) => {
|
|
|
|
this.log.error('PFS Authorization error: %s', err.message)
|
|
|
|
|
|
|
|
if (this._isPfsBindingPendingInBackground) {
|
|
|
|
this._isPfsBindingPendingInBackground = false
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
// if we are in background, we can just retry
|
|
|
|
return this._authorizePfs(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
this._isPfsBindingPending = false
|
|
|
|
this.onError(err)
|
|
|
|
this.reconnect()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
waitForUnencryptedMessage(timeout = 5000): Promise<Buffer> {
|
|
|
|
const promise = createControllablePromise<Buffer>()
|
|
|
|
const timeoutId = setTimeout(() => {
|
|
|
|
promise.reject(new Error('Timeout'))
|
|
|
|
this._pendingWaitForUnencrypted =
|
|
|
|
this._pendingWaitForUnencrypted.filter(
|
2023-06-10 00:37:26 +03:00
|
|
|
(it) => it[0] !== promise,
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
|
|
|
}, timeout)
|
|
|
|
this._pendingWaitForUnencrypted.push([promise, timeoutId])
|
|
|
|
|
|
|
|
return promise
|
|
|
|
}
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
protected async onMessage(data: Buffer): Promise<void> {
|
2022-11-07 00:08:59 +03:00
|
|
|
if (data.readInt32LE(0) === 0 && data.readInt32LE(4) === 0) {
|
|
|
|
// auth_key_id = 0, meaning it's an unencrypted message used for authorization
|
|
|
|
|
|
|
|
if (this._pendingWaitForUnencrypted.length) {
|
2023-08-11 23:59:24 +03:00
|
|
|
const [promise, timeout] =
|
|
|
|
this._pendingWaitForUnencrypted.shift()!
|
2022-11-07 00:08:59 +03:00
|
|
|
clearTimeout(timeout)
|
|
|
|
promise.resolve(data)
|
|
|
|
} else {
|
|
|
|
this.log.debug(
|
2023-06-10 00:37:26 +03:00
|
|
|
'unencrypted message received, but no one is waiting for it',
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-06 02:27:46 +03:00
|
|
|
if (!this._session._authKey.ready) {
|
2021-11-23 00:03:59 +03:00
|
|
|
// if a message is received before authorization,
|
|
|
|
// either the server is misbehaving,
|
|
|
|
// or there was a problem with authorization.
|
|
|
|
this.log.warn('received message before authorization: %h', data)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
try {
|
2022-11-07 00:08:59 +03:00
|
|
|
await this._session.decryptMessage(data, this._handleRawMessage)
|
2021-11-23 00:03:59 +03:00
|
|
|
} catch (err) {
|
|
|
|
this.log.error('failed to decrypt message: %s\ndata: %h', err, data)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private _handleRawMessage(
|
|
|
|
messageId: Long,
|
|
|
|
seqNo: number,
|
2023-06-05 03:30:48 +03:00
|
|
|
message: TlBinaryReader,
|
2021-11-23 00:03:59 +03:00
|
|
|
): void {
|
|
|
|
if (message.peekUint() === 0x3072cfa1) {
|
|
|
|
// gzip_packed
|
|
|
|
// we can't use message.gzip() because it may contain msg_container,
|
|
|
|
// so we parse it manually.
|
|
|
|
message.uint()
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return this._handleRawMessage(
|
|
|
|
messageId,
|
|
|
|
seqNo,
|
|
|
|
new TlBinaryReader(
|
|
|
|
this._readerMap,
|
2023-06-05 03:30:48 +03:00
|
|
|
gzipInflate(message.bytes()),
|
|
|
|
),
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
if (message.peekUint() === 0x73f1f8dc) {
|
|
|
|
// msg_container
|
|
|
|
message.uint()
|
|
|
|
const count = message.uint()
|
|
|
|
|
|
|
|
for (let i = 0; i < count; i++) {
|
|
|
|
// msg_id:long seqno:int bytes:int
|
|
|
|
const msgId = message.long()
|
2023-08-11 23:59:24 +03:00
|
|
|
const seqNo = message.uint() // seqno
|
2021-11-23 00:03:59 +03:00
|
|
|
const length = message.uint()
|
|
|
|
|
2023-08-11 23:59:24 +03:00
|
|
|
// container can't contain other containers, but can contain rpc_result
|
|
|
|
const obj = message.raw(length)
|
2021-11-23 00:03:59 +03:00
|
|
|
|
2023-08-11 23:59:24 +03:00
|
|
|
this._handleRawMessage(
|
|
|
|
msgId,
|
|
|
|
seqNo,
|
|
|
|
new TlBinaryReader(this._readerMap, obj),
|
|
|
|
)
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-07-20 22:07:07 +03:00
|
|
|
if (message.peekUint() === 0xf35c6d01) {
|
|
|
|
// rpc_result
|
|
|
|
message.uint()
|
|
|
|
|
2023-08-11 23:59:24 +03:00
|
|
|
this._sendAck(messageId)
|
|
|
|
|
2023-07-20 22:07:07 +03:00
|
|
|
return this._onRpcResult(message)
|
|
|
|
}
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
// we are safe.. i guess
|
|
|
|
this._handleMessage(messageId, message.object())
|
|
|
|
}
|
|
|
|
|
2023-06-05 03:30:48 +03:00
|
|
|
private _handleMessage(messageId: Long, message_: unknown): void {
|
2021-11-23 00:03:59 +03:00
|
|
|
if (messageId.isEven()) {
|
|
|
|
this.log.warn(
|
|
|
|
'warn: ignoring message with invalid messageId = %s (is even)',
|
2023-06-05 03:30:48 +03:00
|
|
|
messageId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
if (this._session.recentIncomingMsgIds.has(messageId)) {
|
2022-05-18 13:04:40 +03:00
|
|
|
this.log.warn('warn: ignoring duplicate message %s', messageId)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2022-05-18 13:04:40 +03:00
|
|
|
return
|
|
|
|
}
|
2023-06-05 03:30:48 +03:00
|
|
|
const message = message_ as mtp.TlObject
|
2022-05-18 13:04:40 +03:00
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
this.log.verbose('received %s (msg_id: %l)', message._, messageId)
|
|
|
|
this._session.recentIncomingMsgIds.add(messageId)
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
switch (message._) {
|
|
|
|
case 'mt_msgs_ack':
|
|
|
|
case 'mt_http_wait':
|
|
|
|
case 'mt_bad_msg_notification':
|
|
|
|
case 'mt_bad_server_salt':
|
|
|
|
case 'mt_msgs_all_info':
|
|
|
|
case 'mt_msgs_state_info':
|
|
|
|
case 'mt_msg_detailed_info':
|
|
|
|
case 'mt_msg_new_detailed_info':
|
|
|
|
break
|
|
|
|
default:
|
|
|
|
this._sendAck(messageId)
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (message._) {
|
|
|
|
case 'mt_pong':
|
|
|
|
this._onPong(message)
|
|
|
|
break
|
|
|
|
case 'mt_bad_server_salt':
|
|
|
|
this._onBadServerSalt(message)
|
|
|
|
break
|
|
|
|
case 'mt_bad_msg_notification':
|
|
|
|
this._onBadMsgNotification(messageId, message)
|
|
|
|
break
|
|
|
|
case 'mt_msgs_ack':
|
|
|
|
message.msgIds.forEach((msgId) => this._onMessageAcked(msgId))
|
|
|
|
break
|
|
|
|
case 'mt_new_session_created':
|
|
|
|
this._onNewSessionCreated(message)
|
|
|
|
break
|
|
|
|
case 'mt_msgs_all_info':
|
|
|
|
this._onMessagesInfo(message.msgIds, message.info)
|
|
|
|
break
|
|
|
|
case 'mt_msg_detailed_info':
|
|
|
|
this._onMessageInfo(
|
|
|
|
message.msgId,
|
|
|
|
message.status,
|
2023-06-05 03:30:48 +03:00
|
|
|
message.answerMsgId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
break
|
|
|
|
case 'mt_msg_new_detailed_info':
|
|
|
|
this._onMessageInfo(Long.ZERO, 0, message.answerMsgId)
|
|
|
|
break
|
|
|
|
case 'mt_msgs_state_info':
|
|
|
|
this._onMsgsStateInfo(message)
|
|
|
|
break
|
|
|
|
case 'mt_future_salts':
|
|
|
|
// todo
|
|
|
|
break
|
|
|
|
case 'mt_msgs_state_req':
|
|
|
|
case 'mt_msg_resend_req':
|
|
|
|
// tdlib doesnt handle them, so why should we? :upside_down_face:
|
|
|
|
this.log.warn(
|
|
|
|
'received %s (msg_id = %l): %j',
|
|
|
|
message._,
|
|
|
|
messageId,
|
2023-06-05 03:30:48 +03:00
|
|
|
message,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
break
|
2022-11-07 00:08:59 +03:00
|
|
|
case 'mt_destroy_session_ok':
|
|
|
|
case 'mt_destroy_session_none':
|
|
|
|
this._onDestroySessionResult(message)
|
|
|
|
break
|
2021-11-23 00:03:59 +03:00
|
|
|
default:
|
|
|
|
if (tl.isAnyUpdates(message)) {
|
2023-07-20 22:07:07 +03:00
|
|
|
if (this._usable && this.params.inactivityTimeout) {
|
|
|
|
this._rescheduleInactivity()
|
|
|
|
}
|
2021-11-23 00:03:59 +03:00
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
if (this.params.disableUpdates) {
|
|
|
|
this.log.warn(
|
2023-06-10 00:37:26 +03:00
|
|
|
'received updates, but updates are disabled',
|
2022-11-05 03:03:21 +03:00
|
|
|
)
|
2022-11-07 00:08:59 +03:00
|
|
|
// likely due to some request in the session missing invokeWithoutUpdates
|
|
|
|
// todo: reset session
|
2022-11-05 03:03:21 +03:00
|
|
|
break
|
|
|
|
}
|
|
|
|
if (!this.params.isMainConnection) {
|
|
|
|
this.log.warn('received updates on non-main connection')
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
this.emit('update', message)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
this.log.warn('unknown message received: %j', message)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-20 22:07:07 +03:00
|
|
|
private _onRpcResult(message: TlBinaryReader): void {
|
|
|
|
if (this._usable && this.params.inactivityTimeout) {
|
|
|
|
this._rescheduleInactivity()
|
|
|
|
}
|
|
|
|
|
|
|
|
const reqMsgId = message.long()
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
if (reqMsgId.isZero()) {
|
2023-07-20 22:07:07 +03:00
|
|
|
let resultType
|
|
|
|
|
|
|
|
try {
|
|
|
|
resultType = (message.object() as any)._
|
|
|
|
} catch (err) {
|
|
|
|
resultType = message.peekUint()
|
|
|
|
}
|
2021-11-23 00:03:59 +03:00
|
|
|
this.log.warn(
|
2023-08-11 23:59:24 +03:00
|
|
|
'received rpc_result with %j with req_msg_id = 0',
|
2023-07-20 22:07:07 +03:00
|
|
|
resultType,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
const msg = this._session.pendingMessages.get(reqMsgId)
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (!msg) {
|
2023-07-20 22:07:07 +03:00
|
|
|
let result
|
|
|
|
|
|
|
|
try {
|
|
|
|
result = message.object() as any
|
|
|
|
} catch (err) {
|
|
|
|
result = '[failed to parse]'
|
|
|
|
}
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
// check if the msg is one of the recent ones
|
2022-11-05 03:03:21 +03:00
|
|
|
if (this._session.recentOutgoingMsgIds.has(reqMsgId)) {
|
2021-11-23 00:03:59 +03:00
|
|
|
this.log.debug(
|
2023-08-11 23:59:24 +03:00
|
|
|
'received rpc_result again for %l (contains %j)',
|
2021-11-23 00:03:59 +03:00
|
|
|
reqMsgId,
|
2023-07-20 22:07:07 +03:00
|
|
|
result,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
} else {
|
|
|
|
this.log.warn(
|
|
|
|
'received rpc_result for unknown message %l: %j',
|
|
|
|
reqMsgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
result,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
}
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
// special case for auth key binding
|
2021-11-23 00:03:59 +03:00
|
|
|
if (msg._ !== 'rpc') {
|
2022-11-07 00:08:59 +03:00
|
|
|
if (msg._ === 'bind') {
|
2023-08-11 23:59:24 +03:00
|
|
|
msg.promise.resolve(message.object())
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
this.log.error(
|
|
|
|
'received rpc_result for %s request %l',
|
|
|
|
msg._,
|
2023-06-05 03:30:48 +03:00
|
|
|
reqMsgId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
2022-11-07 00:08:59 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
const rpc = msg.rpc
|
|
|
|
|
2023-07-20 22:07:07 +03:00
|
|
|
const customReader = this._readerMap._results![rpc.method]
|
|
|
|
const result: any = customReader ?
|
|
|
|
customReader(message) :
|
|
|
|
message.object()
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
// initConnection call was definitely received and
|
|
|
|
// processed by the server, so we no longer need to use it
|
2022-11-07 00:08:59 +03:00
|
|
|
if (rpc.initConn) {
|
|
|
|
this._session.initConnectionCalled = true
|
|
|
|
}
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
this.log.verbose('<<< (%s) %j', rpc.method, result)
|
|
|
|
|
|
|
|
if (result._ === 'mt_rpc_error') {
|
|
|
|
const res = result as mtp.RawMt_rpc_error
|
|
|
|
this.log.debug(
|
|
|
|
'received rpc_error [%d:%s] for %l (%s)',
|
|
|
|
res.errorCode,
|
|
|
|
res.errorMessage,
|
|
|
|
reqMsgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
rpc.method,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
if (res.errorMessage === 'AUTH_KEY_PERM_EMPTY') {
|
|
|
|
// happens when temp auth key is not yet bound
|
|
|
|
// this shouldn't happen as we block any outbound communications
|
|
|
|
// until the temp key is derived and bound.
|
|
|
|
//
|
|
|
|
// i think it is also possible for the error to be returned
|
|
|
|
// when the temp key has expired, but this still shouldn't happen
|
|
|
|
// but this is tg, so something may go wrong, and we will receive this as an error
|
|
|
|
// (for god's sake why is this not in mtproto and instead hacked into the app layer)
|
|
|
|
this._authorizePfs()
|
|
|
|
this._onMessageFailed(reqMsgId, 'AUTH_KEY_PERM_EMPTY', true)
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res.errorMessage === 'CONNECTION_NOT_INITED') {
|
|
|
|
// this seems to sometimes happen when using pfs
|
|
|
|
// no idea why, but tdlib also seems to handle these, so whatever
|
|
|
|
|
|
|
|
this._session.initConnectionCalled = false
|
|
|
|
this._onMessageFailed(reqMsgId, res.errorMessage, true)
|
|
|
|
|
|
|
|
// just setting this flag is not enough because the message
|
|
|
|
// is already serialized, so we do this awesome hack
|
|
|
|
this.sendRpc({ _: 'help.getNearestDc' })
|
|
|
|
.then(() => {
|
|
|
|
this.log.debug(
|
2023-06-10 00:37:26 +03:00
|
|
|
'additional help.getNearestDc for initConnection ok',
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
|
|
|
})
|
|
|
|
.catch((err) => {
|
|
|
|
this.log.debug(
|
|
|
|
'additional help.getNearestDc for initConnection error: %s',
|
2023-06-10 00:37:26 +03:00
|
|
|
err,
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
|
|
|
})
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (rpc.cancelled) return
|
|
|
|
|
2022-04-01 22:17:10 +03:00
|
|
|
const error = tl.errors.createRpcErrorFromTl(res)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (this.params.niceStacks !== false) {
|
|
|
|
makeNiceStack(error, rpc.stack!, rpc.method)
|
|
|
|
}
|
|
|
|
|
|
|
|
rpc.promise.reject(error)
|
|
|
|
} else {
|
|
|
|
this.log.debug(
|
|
|
|
'received rpc_result (%s) for request %l (%s)',
|
|
|
|
result._,
|
|
|
|
reqMsgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
rpc.method,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
if (rpc.cancelled) return
|
|
|
|
|
|
|
|
rpc.promise.resolve(result)
|
|
|
|
}
|
|
|
|
|
|
|
|
this._onMessageAcked(reqMsgId)
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.delete(reqMsgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
private _onMessageAcked(msgId: Long, inContainer = false): void {
|
2022-11-05 03:03:21 +03:00
|
|
|
const msg = this._session.pendingMessages.get(msgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
if (!msg) {
|
|
|
|
this.log.warn('received ack for unknown message %l', msgId)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (msg._) {
|
|
|
|
case 'container':
|
|
|
|
this.log.debug(
|
|
|
|
'received ack for container %l (size = %d)',
|
|
|
|
msgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
msg.msgIds.length,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
msg.msgIds.forEach((msgId) => this._onMessageAcked(msgId, true))
|
|
|
|
|
|
|
|
// we no longer need info about the container
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.delete(msgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
break
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
case 'rpc': {
|
|
|
|
const rpc = msg.rpc
|
|
|
|
this.log.debug(
|
|
|
|
'received ack for rpc query %l (%s, acked before = %s)',
|
|
|
|
msgId,
|
|
|
|
rpc.method,
|
2023-06-05 03:30:48 +03:00
|
|
|
rpc.acked,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
rpc.acked = true
|
|
|
|
|
|
|
|
if (
|
|
|
|
!inContainer &&
|
|
|
|
rpc.containerId &&
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.has(rpc.containerId)
|
2021-11-23 00:03:59 +03:00
|
|
|
) {
|
|
|
|
// ack all the messages in that container
|
|
|
|
this._onMessageAcked(rpc.containerId)
|
|
|
|
}
|
|
|
|
|
|
|
|
// this message could also already be in some queue,
|
2022-11-05 03:03:21 +03:00
|
|
|
removeFromLongArray(this._session.queuedStateReq, msgId)
|
|
|
|
removeFromLongArray(this._session.queuedResendReq, msgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
// if resend/state was already requested, it will simply be ignored
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.getStateSchedule.remove(rpc)
|
2021-11-23 00:03:59 +03:00
|
|
|
break
|
|
|
|
}
|
2022-11-07 00:08:59 +03:00
|
|
|
case 'bind':
|
|
|
|
break // do nothing, wait for the result
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
default:
|
|
|
|
if (!inContainer) {
|
|
|
|
this.log.warn(
|
|
|
|
'received unexpected ack for %s query %l',
|
|
|
|
msg._,
|
2023-06-05 03:30:48 +03:00
|
|
|
msgId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
private _onAllFailed(reason: string) {
|
|
|
|
// called when all the pending messages are to be resent
|
|
|
|
// e.g. when server returns 429
|
|
|
|
|
|
|
|
// most service messages can be omitted as stale
|
|
|
|
this._resetLastPing(true)
|
|
|
|
|
|
|
|
for (const msgId of this._session.pendingMessages.keys()) {
|
|
|
|
const info = this._session.pendingMessages.get(msgId)!
|
|
|
|
|
|
|
|
switch (info._) {
|
|
|
|
case 'container':
|
|
|
|
case 'state':
|
|
|
|
case 'resend':
|
|
|
|
case 'ping':
|
|
|
|
// no longer relevant
|
|
|
|
this._session.pendingMessages.delete(msgId)
|
|
|
|
break
|
|
|
|
default:
|
|
|
|
this._onMessageFailed(msgId, reason, true)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
private _onMessageFailed(
|
|
|
|
msgId: Long,
|
|
|
|
reason: string,
|
2023-06-05 03:30:48 +03:00
|
|
|
inContainer = false,
|
2021-11-23 00:03:59 +03:00
|
|
|
): void {
|
2022-11-05 03:03:21 +03:00
|
|
|
const msgInfo = this._session.pendingMessages.get(msgId)
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (!msgInfo) {
|
|
|
|
this.log.debug(
|
|
|
|
'unknown message %l failed because of %s',
|
|
|
|
msgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
reason,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (msgInfo._) {
|
|
|
|
case 'container':
|
|
|
|
this.log.debug(
|
|
|
|
'container %l (size = %d) failed because of %s',
|
|
|
|
msgId,
|
|
|
|
msgInfo.msgIds.length,
|
2023-06-05 03:30:48 +03:00
|
|
|
reason,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
msgInfo.msgIds.forEach((msgId) =>
|
2023-06-05 03:30:48 +03:00
|
|
|
this._onMessageFailed(msgId, reason, true),
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
break
|
|
|
|
case 'ping':
|
|
|
|
this.log.debug(
|
|
|
|
'ping (msg_id = %l) failed because of %s',
|
|
|
|
msgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
reason,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
// restart ping
|
|
|
|
this._resetLastPing(true)
|
|
|
|
break
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
case 'rpc': {
|
|
|
|
const rpc = msgInfo.rpc
|
|
|
|
this.log.debug(
|
|
|
|
'rpc query %l (%s) failed because of %s',
|
|
|
|
msgId,
|
|
|
|
rpc.method,
|
2023-06-05 03:30:48 +03:00
|
|
|
reason,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// since the query was rejected, we can let it reassign msg_id to avoid containers
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.delete(msgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
rpc.msgId = undefined
|
|
|
|
this._enqueueRpc(rpc, true)
|
|
|
|
|
|
|
|
if (
|
|
|
|
!inContainer &&
|
|
|
|
rpc.containerId &&
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.has(rpc.containerId)
|
2021-11-23 00:03:59 +03:00
|
|
|
) {
|
|
|
|
// fail all the messages in that container
|
|
|
|
this._onMessageFailed(rpc.containerId, reason)
|
|
|
|
}
|
|
|
|
|
|
|
|
// this message could also already be in some queue,
|
2022-11-05 03:03:21 +03:00
|
|
|
removeFromLongArray(this._session.queuedStateReq, msgId)
|
|
|
|
removeFromLongArray(this._session.queuedResendReq, msgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
// if resend/state was already requested, it will simply be ignored
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.getStateSchedule.remove(rpc)
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
break
|
|
|
|
}
|
|
|
|
case 'resend':
|
|
|
|
this.log.debug(
|
|
|
|
'resend request %l (size = %d) failed because of %s',
|
|
|
|
msgId,
|
|
|
|
msgInfo.msgIds.length,
|
2023-06-05 03:30:48 +03:00
|
|
|
reason,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedResendReq.splice(0, 0, ...msgInfo.msgIds)
|
2021-11-23 00:03:59 +03:00
|
|
|
this._flushTimer.emitWhenIdle()
|
|
|
|
break
|
|
|
|
case 'state':
|
|
|
|
this.log.debug(
|
|
|
|
'state request %l (size = %d) failed because of %s',
|
|
|
|
msgId,
|
|
|
|
msgInfo.msgIds.length,
|
2023-06-05 03:30:48 +03:00
|
|
|
reason,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedStateReq.splice(0, 0, ...msgInfo.msgIds)
|
2021-11-23 00:03:59 +03:00
|
|
|
this._flushTimer.emitWhenIdle()
|
|
|
|
break
|
2022-11-07 00:08:59 +03:00
|
|
|
case 'bind':
|
|
|
|
this.log.debug(
|
|
|
|
'temp key binding request %l failed because of %s, retrying',
|
|
|
|
msgId,
|
2023-06-10 00:37:26 +03:00
|
|
|
reason,
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
|
|
|
msgInfo.promise.reject(Error(reason))
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.delete(msgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
private _resetLastPing(withTime = false): void {
|
|
|
|
if (withTime) this._lastPingTime = 0
|
|
|
|
|
|
|
|
if (!this._lastPingMsgId.isZero()) {
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.delete(this._lastPingMsgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
this._lastPingMsgId = Long.ZERO
|
|
|
|
}
|
|
|
|
|
|
|
|
private _registerOutgoingMsgId(msgId: Long): Long {
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.recentOutgoingMsgIds.add(msgId)
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return msgId
|
|
|
|
}
|
|
|
|
|
|
|
|
private _onPong({ msgId, pingId }: mtp.RawMt_pong): void {
|
2022-11-05 03:03:21 +03:00
|
|
|
const info = this._session.pendingMessages.get(msgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
if (!info) {
|
|
|
|
this.log.warn(
|
|
|
|
'received pong to unknown ping (msg_id %l, ping_id %l)',
|
|
|
|
msgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
pingId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info._ !== 'ping') {
|
|
|
|
this.log.warn(
|
|
|
|
'received pong to %s query, not ping (msg_id %l, ping_id %l)',
|
|
|
|
info._,
|
|
|
|
msgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
pingId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info.pingId.neq(pingId)) {
|
|
|
|
this.log.warn(
|
|
|
|
'received pong to %l, but expected ping_id = %l (got %l)',
|
|
|
|
msgId,
|
|
|
|
info.pingId,
|
2023-06-05 03:30:48 +03:00
|
|
|
pingId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
const rtt = Date.now() - this._lastPingTime
|
|
|
|
this._lastPingRtt = rtt
|
|
|
|
|
|
|
|
if (info.containerId.neq(msgId)) {
|
|
|
|
this._onMessageAcked(info.containerId)
|
|
|
|
}
|
|
|
|
|
|
|
|
this.log.debug(
|
|
|
|
'received pong: msg_id %l, ping_id %l, rtt = %dms',
|
|
|
|
msgId,
|
|
|
|
pingId,
|
2023-06-05 03:30:48 +03:00
|
|
|
rtt,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
this._resetLastPing()
|
|
|
|
}
|
|
|
|
|
|
|
|
private _onBadServerSalt(msg: mtp.RawMt_bad_server_salt): void {
|
|
|
|
this._session.serverSalt = msg.newServerSalt
|
|
|
|
|
|
|
|
this._onMessageFailed(msg.badMsgId, 'bad_server_salt')
|
|
|
|
}
|
|
|
|
|
|
|
|
private _onBadMsgNotification(
|
|
|
|
msgId: Long,
|
2023-06-05 03:30:48 +03:00
|
|
|
msg: mtp.RawMt_bad_msg_notification,
|
2021-11-23 00:03:59 +03:00
|
|
|
): void {
|
|
|
|
switch (msg.errorCode) {
|
|
|
|
case 16:
|
|
|
|
case 17:
|
|
|
|
case 20: {
|
|
|
|
if (msg.errorCode !== 20) {
|
|
|
|
// msg_id is either too high or too low
|
|
|
|
// code 20 means msg_id is too old,
|
|
|
|
// we just need to resend the message
|
|
|
|
const serverTime = msgId.low >>> 0
|
|
|
|
const timeOffset =
|
|
|
|
Math.floor(Date.now() / 1000) - serverTime
|
|
|
|
|
|
|
|
this._session._timeOffset = timeOffset
|
|
|
|
this.log.debug(
|
|
|
|
'server time: %d, corrected offset to %d',
|
|
|
|
serverTime,
|
2023-06-05 03:30:48 +03:00
|
|
|
timeOffset,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
this._onMessageFailed(
|
|
|
|
msg.badMsgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
`bad_msg_notification ${msg.errorCode}`,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
// something went very wrong, we need to reset the session
|
|
|
|
this.log.error(
|
2023-06-05 03:30:48 +03:00
|
|
|
'received bad_msg_notification for msg_id = %l, code = %d. session will be reset',
|
2022-11-07 00:08:59 +03:00
|
|
|
msg.badMsgId,
|
2023-06-10 00:37:26 +03:00
|
|
|
msg.errorCode,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
this._resetSession()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private _onNewSessionCreated({
|
|
|
|
firstMsgId,
|
|
|
|
serverSalt,
|
|
|
|
uniqueId,
|
|
|
|
}: mtp.RawMt_new_session_created): void {
|
|
|
|
if (uniqueId.eq(this._lastSessionCreatedUid)) {
|
|
|
|
this.log.debug(
|
|
|
|
'received new_session_created with the same uid = %l, ignoring',
|
2023-06-05 03:30:48 +03:00
|
|
|
uniqueId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if (
|
|
|
|
!this._lastSessionCreatedUid.isZero() &&
|
|
|
|
!this.params.disableUpdates
|
|
|
|
) {
|
|
|
|
// force the client to fetch missed updates
|
|
|
|
// when _lastSessionCreatedUid == 0, the connection has
|
|
|
|
// just been established, and the client will fetch them anyways
|
|
|
|
this.emit('update', { _: 'updatesTooLong' })
|
|
|
|
}
|
|
|
|
|
|
|
|
this._session.serverSalt = serverSalt
|
|
|
|
|
|
|
|
this.log.debug(
|
|
|
|
'received new_session_created, uid = %l, first msg_id = %l',
|
|
|
|
uniqueId,
|
2023-06-05 03:30:48 +03:00
|
|
|
firstMsgId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
for (const msgId of this._session.pendingMessages.keys()) {
|
|
|
|
const val = this._session.pendingMessages.get(msgId)!
|
2021-11-23 00:03:59 +03:00
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
if (val._ === 'bind') {
|
|
|
|
// should NOT happen.
|
|
|
|
if (msgId.lt(firstMsgId)) {
|
|
|
|
this._onMessageFailed(msgId, 'received in wrong session')
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (val._ === 'container') {
|
|
|
|
if (msgId.lt(firstMsgId)) {
|
|
|
|
// all messages in this container will be resent
|
|
|
|
// info about this container is no longer needed
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.delete(msgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
const containerId =
|
|
|
|
val._ === 'rpc' ? val.rpc.containerId || msgId : val.containerId
|
|
|
|
|
|
|
|
if (containerId.lt(firstMsgId)) {
|
|
|
|
this._onMessageFailed(msgId, 'new_session_created', true)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private _onMessageInfo(
|
|
|
|
msgId: Long,
|
|
|
|
status: number,
|
2023-06-05 03:30:48 +03:00
|
|
|
answerMsgId: Long,
|
2021-11-23 00:03:59 +03:00
|
|
|
): void {
|
|
|
|
if (!msgId.isZero()) {
|
2022-11-05 03:03:21 +03:00
|
|
|
const info = this._session.pendingMessages.get(msgId)
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (!info) {
|
|
|
|
this.log.info(
|
|
|
|
'received message info about unknown message %l',
|
2023-06-05 03:30:48 +03:00
|
|
|
msgId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (status & 7) {
|
|
|
|
case 1:
|
|
|
|
case 2:
|
|
|
|
case 3:
|
|
|
|
// message wasn't received by the server
|
|
|
|
this._onMessageFailed(msgId, `message info state ${status}`)
|
|
|
|
break
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
case 0:
|
|
|
|
if (!answerMsgId.isZero()) {
|
|
|
|
this.log.warn(
|
|
|
|
'received message info with status = 0: msg_id = %l, status = %d, ans_id = %l',
|
|
|
|
msgId,
|
|
|
|
status,
|
2023-06-05 03:30:48 +03:00
|
|
|
answerMsgId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return this._onMessageFailed(
|
|
|
|
msgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
'message info state = 0, ans_id = 0',
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
// fallthrough
|
|
|
|
case 4:
|
|
|
|
this._onMessageAcked(msgId)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (
|
|
|
|
!answerMsgId.isZero() &&
|
2022-11-05 03:03:21 +03:00
|
|
|
!this._session.recentIncomingMsgIds.has(answerMsgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
) {
|
|
|
|
this.log.debug(
|
|
|
|
'received message info for %l, but answer (%l) was not received yet',
|
|
|
|
msgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
answerMsgId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedResendReq.push(answerMsgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
this._flushTimer.emitWhenIdle()
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
this.log.debug(
|
|
|
|
'received message info for %l, and answer (%l) was already received',
|
|
|
|
msgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
answerMsgId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
private _onMessagesInfo(msgIds: Long[], info: Buffer): void {
|
|
|
|
if (msgIds.length !== info.length) {
|
|
|
|
this.log.warn(
|
2023-06-05 03:30:48 +03:00
|
|
|
'messages state info was invalid: msg_ids.length !== info.length',
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
for (let i = 0; i < msgIds.length; i++) {
|
|
|
|
this._onMessageInfo(msgIds[i], info[i], Long.ZERO)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private _onMsgsStateInfo(msg: mtp.RawMt_msgs_state_info): void {
|
2022-11-05 03:03:21 +03:00
|
|
|
const info = this._session.pendingMessages.get(msg.reqMsgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
if (!info) {
|
|
|
|
this.log.warn(
|
|
|
|
'received msgs_state_info to unknown request %l',
|
2023-06-05 03:30:48 +03:00
|
|
|
msg.reqMsgId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info._ !== 'state') {
|
|
|
|
this.log.warn(
|
|
|
|
'received msgs_state_info to %s query %l',
|
|
|
|
info._,
|
2023-06-05 03:30:48 +03:00
|
|
|
msg.reqMsgId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
this._onMessagesInfo(info.msgIds, msg.info)
|
|
|
|
}
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
private _onDestroySessionResult(msg: mtp.TypeDestroySessionRes): void {
|
|
|
|
const reqMsgId = this._session.destroySessionIdToMsgId.get(
|
2023-06-10 00:37:26 +03:00
|
|
|
msg.sessionId,
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
if (!reqMsgId) {
|
|
|
|
this.log.warn(
|
|
|
|
'received %s for unknown session %h',
|
|
|
|
msg._,
|
2023-06-10 00:37:26 +03:00
|
|
|
msg.sessionId,
|
2022-11-07 00:08:59 +03:00
|
|
|
)
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
this._session.destroySessionIdToMsgId.delete(msg.sessionId)
|
|
|
|
this._session.pendingMessages.delete(reqMsgId)
|
|
|
|
this.log.debug('received %s for session %h', msg._, msg.sessionId)
|
|
|
|
}
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
private _enqueueRpc(rpc: PendingRpc, force?: boolean) {
|
2023-08-11 23:59:24 +03:00
|
|
|
if (this._session.enqueueRpc(rpc, force)) {
|
|
|
|
this._flushTimer.emitWhenIdle()
|
|
|
|
}
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
_resetSession(): void {
|
|
|
|
this._queuedDestroySession.push(this._session._sessionId)
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.resetState(true)
|
2021-11-23 00:03:59 +03:00
|
|
|
this.reconnect()
|
|
|
|
|
|
|
|
// once we receive new_session_created, all pending messages will be resent.
|
|
|
|
this._flushTimer.reset()
|
|
|
|
}
|
|
|
|
|
|
|
|
private _sendAck(msgId: Long): void {
|
2022-11-05 03:03:21 +03:00
|
|
|
if (this._session.queuedAcks.length === 0) {
|
2021-11-23 00:03:59 +03:00
|
|
|
this._flushTimer.emitBeforeNext(30000)
|
|
|
|
}
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedAcks.push(msgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
if (this._session.queuedAcks.length >= 100) {
|
2021-11-23 00:03:59 +03:00
|
|
|
this._flushTimer.emitNow()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sendRpc<T extends tl.RpcMethod>(
|
|
|
|
request: T,
|
|
|
|
stack?: string,
|
2023-06-05 03:30:48 +03:00
|
|
|
timeout?: number,
|
2021-11-23 00:03:59 +03:00
|
|
|
): Promise<tl.RpcCallReturn[T['_']]> {
|
2023-07-20 22:07:07 +03:00
|
|
|
if (this._usable && this.params.inactivityTimeout) {
|
|
|
|
this._rescheduleInactivity()
|
|
|
|
}
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
if (!stack && this.params.niceStacks !== false) {
|
|
|
|
stack = new Error().stack
|
|
|
|
}
|
|
|
|
|
|
|
|
const method = request._
|
|
|
|
|
|
|
|
let obj: tl.TlObject = request
|
|
|
|
let initConn = false
|
|
|
|
|
|
|
|
if (this.params.disableUpdates) {
|
|
|
|
obj = {
|
|
|
|
_: 'invokeWithoutUpdates',
|
|
|
|
query: obj,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
if (!this._session.initConnectionCalled) {
|
2021-11-23 00:03:59 +03:00
|
|
|
// we will wrap every rpc call with initConnection
|
|
|
|
// until some of the requests wrapped with it is
|
|
|
|
// either acked or returns rpc_result
|
|
|
|
|
|
|
|
this.log.debug(
|
|
|
|
'wrapping %s with initConnection, layer: %d',
|
|
|
|
method,
|
2023-06-05 03:30:48 +03:00
|
|
|
this.params.layer,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
obj = {
|
|
|
|
_: 'invokeWithLayer',
|
|
|
|
layer: this.params.layer,
|
|
|
|
query: {
|
|
|
|
...this.params.initConnection,
|
|
|
|
query: obj,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
initConn = true
|
|
|
|
}
|
|
|
|
|
|
|
|
this.log.verbose('>>> %j', obj)
|
|
|
|
|
2022-06-30 16:32:56 +03:00
|
|
|
let content = TlBinaryWriter.serializeObject(this._writerMap, obj)
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
if (content.length > 1044404) {
|
|
|
|
// if you send larger payloads, telegram will just close connection,
|
|
|
|
// and since we resend them, it will get resent after reconnection and
|
|
|
|
// that will be an endless loop of reconnections. we don't want that,
|
|
|
|
// and payloads this large are usually a sign of an error in the code.
|
|
|
|
throw new Error(`Payload is too big (${content.length} > 1044404)`)
|
|
|
|
}
|
|
|
|
|
|
|
|
// gzip
|
|
|
|
let shouldGzip = content.length > 128
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (content.length > 16384) {
|
|
|
|
// test compression ratio for the middle part
|
|
|
|
// if it is less than 0.9, then try to compress the whole request
|
|
|
|
|
|
|
|
const middle = ~~((content.length - 1024) / 2)
|
|
|
|
const gzipped = gzipDeflate(
|
|
|
|
content.slice(middle, middle + 1024),
|
2023-06-05 03:30:48 +03:00
|
|
|
0.9,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
if (!gzipped) shouldGzip = false
|
|
|
|
}
|
|
|
|
|
|
|
|
if (shouldGzip) {
|
|
|
|
const gzipped = gzipDeflate(content, 0.9)
|
|
|
|
|
|
|
|
if (gzipped) {
|
|
|
|
this.log.debug(
|
|
|
|
'gzipped %s (%db -> %db)',
|
|
|
|
method,
|
|
|
|
content.length,
|
2023-06-05 03:30:48 +03:00
|
|
|
gzipped.length,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
content = gzipped
|
|
|
|
} else {
|
|
|
|
shouldGzip = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const pending: PendingRpc = {
|
|
|
|
method,
|
|
|
|
promise: undefined as any, // because we need the object to make a promise
|
|
|
|
data: content,
|
|
|
|
stack,
|
|
|
|
// we will need to know size of gzip_packed overhead in _flush()
|
2023-06-05 03:30:48 +03:00
|
|
|
gzipOverhead: shouldGzip ?
|
|
|
|
4 + TlSerializationCounter.countBytesOverhead(content.length) :
|
|
|
|
0,
|
2021-11-23 00:03:59 +03:00
|
|
|
initConn,
|
|
|
|
|
|
|
|
// setting them as well so jit can optimize stuff
|
|
|
|
sent: undefined,
|
|
|
|
getState: undefined,
|
|
|
|
msgId: undefined,
|
|
|
|
seqNo: undefined,
|
|
|
|
containerId: undefined,
|
|
|
|
acked: undefined,
|
|
|
|
cancelled: undefined,
|
|
|
|
timeout: undefined,
|
|
|
|
}
|
|
|
|
|
2023-07-20 22:07:07 +03:00
|
|
|
const promise = createCancellablePromise<any>(
|
2023-06-05 03:30:48 +03:00
|
|
|
this._cancelRpc.bind(this, pending),
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
pending.promise = promise
|
|
|
|
|
|
|
|
if (timeout) {
|
|
|
|
pending.timeout = setTimeout(
|
|
|
|
this._cancelRpc,
|
|
|
|
timeout,
|
|
|
|
pending,
|
2023-06-05 03:30:48 +03:00
|
|
|
true,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
this._enqueueRpc(pending, true)
|
|
|
|
|
|
|
|
return promise
|
|
|
|
}
|
|
|
|
|
|
|
|
private _cancelRpc(rpc: PendingRpc, onTimeout = false): void {
|
|
|
|
if (rpc.cancelled && !onTimeout) {
|
|
|
|
throw new Error('RPC was already cancelled')
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!onTimeout && rpc.timeout) {
|
|
|
|
clearTimeout(rpc.timeout)
|
|
|
|
}
|
|
|
|
|
|
|
|
if (onTimeout) {
|
2022-04-01 22:17:10 +03:00
|
|
|
const error = new tl.errors.RpcTimeoutError()
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (this.params.niceStacks !== false) {
|
|
|
|
makeNiceStack(error, rpc.stack!, rpc.method)
|
|
|
|
}
|
|
|
|
|
|
|
|
rpc.promise.reject(error)
|
|
|
|
}
|
|
|
|
|
|
|
|
rpc.cancelled = true
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (rpc.msgId) {
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedCancelReq.push(rpc.msgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
this._flushTimer.emitWhenIdle()
|
|
|
|
} else {
|
|
|
|
// in case rpc wasn't sent yet (or had some error),
|
|
|
|
// we can simply remove it from queue
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedRpc.remove(rpc)
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-11 23:59:24 +03:00
|
|
|
private get _hasPendingServiceMessages(): boolean {
|
|
|
|
return Boolean(
|
|
|
|
this._session.queuedRpc.length ||
|
|
|
|
this._session.queuedAcks.length ||
|
|
|
|
this._session.queuedStateReq.length ||
|
|
|
|
this._session.queuedResendReq.length,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
protected _onInactivityTimeout() {
|
|
|
|
// we should send all pending acks and other service messages
|
|
|
|
// before dropping the connection
|
|
|
|
|
|
|
|
if (!this._hasPendingServiceMessages) {
|
|
|
|
this.log.debug('no pending service messages, closing connection')
|
|
|
|
super._onInactivityTimeout()
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
this._flush(() => {
|
|
|
|
if (this._hasPendingServiceMessages) {
|
|
|
|
// the callback will be called again once all pending messages are sent
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
this.log.debug('pending service messages sent, closing connection')
|
|
|
|
this._flushTimer.reset()
|
|
|
|
super._onInactivityTimeout()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
private _flush(callback?: () => void): void {
|
2022-11-07 00:08:59 +03:00
|
|
|
if (
|
|
|
|
!this._session._authKey.ready ||
|
|
|
|
this._isPfsBindingPending ||
|
|
|
|
this._current429Timeout
|
|
|
|
) {
|
2023-08-11 23:59:24 +03:00
|
|
|
this.log.debug(
|
|
|
|
'skipping flush, connection is not usable (auth key ready = %b, pfs binding pending = %b, 429 timeout = %b)',
|
|
|
|
this._session._authKey.ready,
|
|
|
|
this._isPfsBindingPending,
|
|
|
|
Boolean(this._current429Timeout),
|
|
|
|
)
|
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
// it will be flushed once connection is usable
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
try {
|
2023-08-11 23:59:24 +03:00
|
|
|
this._doFlush(callback)
|
2022-04-28 16:58:09 +03:00
|
|
|
} catch (e: any) {
|
2021-11-23 00:03:59 +03:00
|
|
|
this.log.error('flush error: %s', e.stack)
|
|
|
|
// should not happen unless there's a bug in the code
|
|
|
|
}
|
|
|
|
|
|
|
|
// schedule next flush
|
|
|
|
// if there are more queued requests, flush immediately
|
|
|
|
// (they likely didn't fit into one message)
|
|
|
|
if (
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedRpc.length ||
|
|
|
|
this._session.queuedAcks.length ||
|
|
|
|
this._session.queuedStateReq.length ||
|
|
|
|
this._session.queuedResendReq.length
|
2021-11-23 00:03:59 +03:00
|
|
|
) {
|
2023-08-11 23:59:24 +03:00
|
|
|
this._flush(callback)
|
2021-11-23 00:03:59 +03:00
|
|
|
} else {
|
|
|
|
this._flushTimer.emitBefore(this._lastPingTime + 60000)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-11 23:59:24 +03:00
|
|
|
private _doFlush(callback?: () => void): void {
|
2021-11-23 00:03:59 +03:00
|
|
|
this.log.debug(
|
|
|
|
'flushing send queue. queued rpc: %d',
|
2023-06-10 00:37:26 +03:00
|
|
|
this._session.queuedRpc.length,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// oh bloody hell mate
|
|
|
|
|
|
|
|
// total size & count
|
|
|
|
let packetSize = 0
|
|
|
|
let messageCount = 0
|
|
|
|
// size & msg count that count towards container limit
|
|
|
|
// these will be added to total later
|
|
|
|
let containerMessageCount = 0
|
|
|
|
let containerSize = 0
|
|
|
|
|
|
|
|
let ackRequest: Buffer | null = null
|
|
|
|
let ackMsgIds: Long[] | null = null
|
|
|
|
|
|
|
|
let pingRequest: Buffer | null = null
|
|
|
|
let pingId: Long | null = null
|
|
|
|
let pingMsgId: Long | null = null
|
|
|
|
|
|
|
|
let getStateRequest: Buffer | null = null
|
|
|
|
let getStateMsgId: Long | null = null
|
|
|
|
let getStateMsgIds: Long[] | null = null
|
|
|
|
|
|
|
|
let resendRequest: Buffer | null = null
|
|
|
|
let resendMsgId: Long | null = null
|
|
|
|
let resendMsgIds: Long[] | null = null
|
|
|
|
|
|
|
|
let cancelRpcs: Long[] | null = null
|
|
|
|
let destroySessions: Long[] | null = null
|
|
|
|
|
|
|
|
const now = Date.now()
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
if (this._session.queuedAcks.length) {
|
|
|
|
let acks = this._session.queuedAcks
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (acks.length > 8192) {
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedAcks = acks.slice(8192)
|
2021-11-23 00:03:59 +03:00
|
|
|
acks = acks.slice(0, 8192)
|
|
|
|
} else {
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedAcks = []
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
const obj: mtp.RawMt_msgs_ack = {
|
|
|
|
_: 'mt_msgs_ack',
|
|
|
|
msgIds: acks,
|
|
|
|
}
|
|
|
|
ackMsgIds = obj.msgIds
|
|
|
|
|
|
|
|
ackRequest = TlBinaryWriter.serializeObject(this._writerMap, obj)
|
|
|
|
packetSize += ackRequest.length + 16
|
|
|
|
messageCount += 1
|
|
|
|
}
|
|
|
|
|
|
|
|
const getStateTime = now + 1500
|
|
|
|
|
|
|
|
if (now - this._lastPingTime > 60000) {
|
|
|
|
if (!this._lastPingMsgId.isZero()) {
|
|
|
|
this.log.warn(
|
|
|
|
"didn't receive pong for previous ping (msg_id = %l)",
|
2023-06-05 03:30:48 +03:00
|
|
|
this._lastPingMsgId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.delete(this._lastPingMsgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
pingId = randomLong()
|
|
|
|
const obj: mtp.RawMt_ping = {
|
|
|
|
_: 'mt_ping',
|
|
|
|
pingId,
|
|
|
|
}
|
|
|
|
|
|
|
|
this._lastPingTime = Date.now()
|
|
|
|
|
|
|
|
pingRequest = TlBinaryWriter.serializeObject(this._writerMap, obj)
|
|
|
|
containerSize += pingRequest.length + 16
|
|
|
|
containerMessageCount += 1
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2022-11-05 03:03:21 +03:00
|
|
|
if (this._session.queuedStateReq.length) {
|
|
|
|
let ids = this._session.queuedStateReq
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (ids.length > 8192) {
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedStateReq = ids.slice(8192)
|
2021-11-23 00:03:59 +03:00
|
|
|
ids = ids.slice(0, 8192)
|
|
|
|
} else {
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedStateReq = []
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
getStateMsgIds = ids
|
|
|
|
}
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
const idx = this._session.getStateSchedule.index(
|
2021-11-23 00:03:59 +03:00
|
|
|
{ getState: now } as any,
|
2023-06-05 03:30:48 +03:00
|
|
|
true,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (idx > 0) {
|
2022-11-05 03:03:21 +03:00
|
|
|
const toGetState = this._session.getStateSchedule.raw.splice(
|
|
|
|
0,
|
2023-06-10 00:37:26 +03:00
|
|
|
idx,
|
2022-11-05 03:03:21 +03:00
|
|
|
)
|
2021-11-23 00:03:59 +03:00
|
|
|
if (!getStateMsgIds) getStateMsgIds = []
|
|
|
|
toGetState.forEach((it) => getStateMsgIds!.push(it.msgId!))
|
|
|
|
}
|
|
|
|
|
|
|
|
if (getStateMsgIds) {
|
|
|
|
const obj: mtp.RawMt_msgs_state_req = {
|
|
|
|
_: 'mt_msgs_state_req',
|
|
|
|
msgIds: getStateMsgIds,
|
|
|
|
}
|
|
|
|
|
2022-06-30 16:32:56 +03:00
|
|
|
getStateRequest = TlBinaryWriter.serializeObject(
|
|
|
|
this._writerMap,
|
2023-06-05 03:30:48 +03:00
|
|
|
obj,
|
2022-06-30 16:32:56 +03:00
|
|
|
)
|
2021-11-23 00:03:59 +03:00
|
|
|
packetSize += getStateRequest.length + 16
|
|
|
|
messageCount += 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
if (this._session.queuedResendReq.length) {
|
|
|
|
resendMsgIds = this._session.queuedResendReq
|
2023-06-10 00:37:26 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (resendMsgIds.length > 8192) {
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedResendReq = resendMsgIds.slice(8192)
|
2021-11-23 00:03:59 +03:00
|
|
|
resendMsgIds = resendMsgIds.slice(0, 8192)
|
|
|
|
} else {
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedResendReq = []
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
const obj: mtp.RawMt_msg_resend_req = {
|
|
|
|
_: 'mt_msg_resend_req',
|
|
|
|
msgIds: resendMsgIds,
|
|
|
|
}
|
|
|
|
|
|
|
|
resendRequest = TlBinaryWriter.serializeObject(this._writerMap, obj)
|
|
|
|
packetSize += resendRequest.length + 16
|
|
|
|
messageCount += 1
|
|
|
|
}
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
if (this._session.queuedCancelReq.length) {
|
|
|
|
containerMessageCount += this._session.queuedCancelReq.length
|
|
|
|
containerSize += this._session.queuedCancelReq.length * 28
|
|
|
|
cancelRpcs = this._session.queuedCancelReq
|
|
|
|
this._session.queuedCancelReq = []
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (this._queuedDestroySession.length) {
|
2022-11-06 21:53:35 +03:00
|
|
|
containerMessageCount += this._queuedDestroySession.length
|
|
|
|
containerSize += this._queuedDestroySession.length * 28
|
2021-11-23 00:03:59 +03:00
|
|
|
destroySessions = this._queuedDestroySession
|
|
|
|
this._queuedDestroySession = []
|
|
|
|
}
|
|
|
|
|
|
|
|
let forceContainer = false
|
|
|
|
const rpcToSend: PendingRpc[] = []
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
while (
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.queuedRpc.length &&
|
2021-11-23 00:03:59 +03:00
|
|
|
containerSize < 32768 && // 2^15
|
|
|
|
containerMessageCount < 1020
|
|
|
|
) {
|
2022-11-05 03:03:21 +03:00
|
|
|
const msg = this._session.queuedRpc.popFront()!
|
2021-11-23 00:03:59 +03:00
|
|
|
if (msg.cancelled) continue
|
|
|
|
|
|
|
|
// note: we don't check for <2^15 here
|
|
|
|
// this is not documented, but large requests
|
|
|
|
// (like upload.saveFilePart) *may* exceed that limit
|
|
|
|
|
|
|
|
rpcToSend.push(msg)
|
|
|
|
containerSize += msg.data.length + 16
|
|
|
|
if (msg.gzipOverhead) containerSize += msg.gzipOverhead
|
|
|
|
|
|
|
|
// if message was already assigned a msg_id,
|
|
|
|
// we must wrap it in a container with a newer msg_id
|
|
|
|
if (msg.msgId) forceContainer = true
|
|
|
|
}
|
|
|
|
|
|
|
|
packetSize += containerSize
|
|
|
|
messageCount += containerMessageCount + rpcToSend.length
|
|
|
|
|
|
|
|
if (!messageCount) {
|
|
|
|
this.log.debug('flush failed: nothing to flush')
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
const useContainer = forceContainer || messageCount > 1
|
|
|
|
if (useContainer) packetSize += 24 // 8 (msg_container) + 16 (mtproto header)
|
|
|
|
|
|
|
|
const writer = TlBinaryWriter.alloc(this._writerMap, packetSize)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (useContainer) {
|
|
|
|
// leave bytes for mtproto header (we'll write it later,
|
|
|
|
// since we need seqno and msg_id to be larger than the content)
|
|
|
|
writer.pos += 16
|
|
|
|
writer.uint(0x73f1f8dc) // msg_container
|
|
|
|
writer.uint(messageCount)
|
|
|
|
}
|
|
|
|
|
|
|
|
const otherPendings: Exclude<
|
|
|
|
PendingMessage,
|
2022-11-07 00:08:59 +03:00
|
|
|
{ _: 'rpc' | 'container' | 'bind' }
|
2021-11-23 00:03:59 +03:00
|
|
|
>[] = []
|
|
|
|
|
2023-06-05 03:30:48 +03:00
|
|
|
if (ackRequest) {
|
2021-11-23 00:03:59 +03:00
|
|
|
this._registerOutgoingMsgId(
|
2023-06-05 03:30:48 +03:00
|
|
|
this._session.writeMessage(writer, ackRequest),
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
2023-06-05 03:30:48 +03:00
|
|
|
}
|
2021-11-23 00:03:59 +03:00
|
|
|
|
|
|
|
if (pingRequest) {
|
|
|
|
pingMsgId = this._registerOutgoingMsgId(
|
2023-06-05 03:30:48 +03:00
|
|
|
this._session.writeMessage(writer, pingRequest),
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
this._lastPingMsgId = pingMsgId
|
|
|
|
const pingPending: PendingMessage = {
|
|
|
|
_: 'ping',
|
|
|
|
pingId: pingId!,
|
|
|
|
containerId: pingMsgId,
|
|
|
|
}
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.set(pingMsgId, pingPending)
|
2021-11-23 00:03:59 +03:00
|
|
|
otherPendings.push(pingPending)
|
|
|
|
}
|
|
|
|
|
|
|
|
if (getStateRequest) {
|
|
|
|
getStateMsgId = this._registerOutgoingMsgId(
|
2023-06-05 03:30:48 +03:00
|
|
|
this._session.writeMessage(writer, getStateRequest),
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
const getStatePending: PendingMessage = {
|
|
|
|
_: 'state',
|
|
|
|
msgIds: getStateMsgIds!,
|
|
|
|
containerId: getStateMsgId,
|
|
|
|
}
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.set(getStateMsgId, getStatePending)
|
2021-11-23 00:03:59 +03:00
|
|
|
otherPendings.push(getStatePending)
|
|
|
|
}
|
|
|
|
|
|
|
|
if (resendRequest) {
|
|
|
|
resendMsgId = this._registerOutgoingMsgId(
|
2023-06-05 03:30:48 +03:00
|
|
|
this._session.writeMessage(writer, resendRequest),
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
const resendPending: PendingMessage = {
|
|
|
|
_: 'resend',
|
|
|
|
msgIds: resendMsgIds!,
|
|
|
|
containerId: resendMsgId,
|
|
|
|
}
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.set(resendMsgId, resendPending)
|
2021-11-23 00:03:59 +03:00
|
|
|
otherPendings.push(resendPending)
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cancelRpcs) {
|
|
|
|
cancelRpcs.forEach((msgId) => {
|
|
|
|
const cancelMsgId = this._registerOutgoingMsgId(
|
|
|
|
this._session.writeMessage(writer, {
|
|
|
|
_: 'mt_rpc_drop_answer',
|
|
|
|
reqMsgId: msgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
}),
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const pending: PendingMessage = {
|
|
|
|
_: 'cancel',
|
|
|
|
msgId,
|
|
|
|
containerId: cancelMsgId,
|
|
|
|
}
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.set(cancelMsgId, pending)
|
2021-11-23 00:03:59 +03:00
|
|
|
otherPendings.push(pending)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if (destroySessions) {
|
|
|
|
destroySessions.forEach((sessionId) => {
|
|
|
|
const msgId = this._registerOutgoingMsgId(
|
|
|
|
this._session.writeMessage(writer, {
|
|
|
|
_: 'mt_destroy_session',
|
|
|
|
sessionId,
|
2023-06-05 03:30:48 +03:00
|
|
|
}),
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const pending: PendingMessage = {
|
|
|
|
_: 'destroy_session',
|
|
|
|
sessionId,
|
|
|
|
containerId: msgId,
|
|
|
|
}
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.set(msgId, pending)
|
2022-11-07 00:08:59 +03:00
|
|
|
this._session.destroySessionIdToMsgId.set(sessionId, msgId)
|
2021-11-23 00:03:59 +03:00
|
|
|
otherPendings.push(pending)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
for (let i = 0; i < rpcToSend.length; i++) {
|
|
|
|
const msg = rpcToSend[i]
|
|
|
|
// not using writeMessage here because we also need seqNo, and
|
|
|
|
// i dont want to also return seqNo there because that would mean
|
|
|
|
// extra object overhead
|
|
|
|
|
|
|
|
if (!msg.msgId) {
|
|
|
|
const msgId = this._session.getMessageId()
|
|
|
|
const seqNo = this._session.getSeqNo()
|
|
|
|
|
|
|
|
this.log.debug(
|
|
|
|
'%s: msg_id assigned %l, seqno: %d',
|
|
|
|
msg.method,
|
|
|
|
msgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
seqNo,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
msg.msgId = msgId
|
|
|
|
msg.seqNo = seqNo
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.set(msgId, {
|
2021-11-23 00:03:59 +03:00
|
|
|
_: 'rpc',
|
|
|
|
rpc: msg,
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
this.log.debug(
|
|
|
|
'%s: msg_id already assigned, reusing %l, seqno: %d',
|
|
|
|
msg.method,
|
|
|
|
msg.msgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
msg.seqNo,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// (re-)schedule get_state if needed
|
|
|
|
if (msg.getState) {
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.getStateSchedule.remove(msg)
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
if (!msg.acked) {
|
|
|
|
msg.getState = getStateTime
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.getStateSchedule.insert(msg)
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
writer.long(this._registerOutgoingMsgId(msg.msgId))
|
|
|
|
writer.uint(msg.seqNo!)
|
2023-06-05 03:30:48 +03:00
|
|
|
|
2021-11-23 00:03:59 +03:00
|
|
|
if (msg.gzipOverhead) {
|
|
|
|
writer.uint(msg.data.length + msg.gzipOverhead)
|
|
|
|
writer.uint(0x3072cfa1) // gzip_packed#3072cfa1
|
|
|
|
writer.bytes(msg.data)
|
|
|
|
} else {
|
|
|
|
writer.uint(msg.data.length)
|
|
|
|
writer.raw(msg.data)
|
|
|
|
}
|
|
|
|
|
|
|
|
msg.sent = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if (useContainer) {
|
|
|
|
// we now need to assign the container msg_id and seqno
|
|
|
|
// we couldn't have assigned them earlier because mtproto
|
|
|
|
// requires them to be >= than the contained messages
|
|
|
|
|
|
|
|
// writer.pos is expected to be packetSize
|
|
|
|
|
|
|
|
const containerId = this._session.getMessageId()
|
|
|
|
writer.pos = 0
|
|
|
|
writer.long(this._registerOutgoingMsgId(containerId))
|
|
|
|
writer.uint(this._session.getSeqNo(false))
|
|
|
|
writer.uint(packetSize - 16)
|
|
|
|
writer.pos = packetSize
|
|
|
|
|
|
|
|
const msgIds = []
|
|
|
|
|
|
|
|
for (let i = 0; i < rpcToSend.length; i++) {
|
|
|
|
const msg = rpcToSend[i]
|
|
|
|
msg.containerId = containerId
|
|
|
|
msgIds.push(msg.msgId!)
|
|
|
|
}
|
|
|
|
|
|
|
|
if (otherPendings.length) {
|
|
|
|
otherPendings.forEach((msg) => {
|
|
|
|
msgIds.push(msg.containerId)
|
|
|
|
msg.containerId = containerId
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-11-05 03:03:21 +03:00
|
|
|
this._session.pendingMessages.set(containerId, {
|
|
|
|
_: 'container',
|
|
|
|
msgIds,
|
|
|
|
})
|
2021-11-23 00:03:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
const result = writer.result()
|
|
|
|
// probably the easiest way lol
|
|
|
|
const rootMsgId = new Long(result.readInt32LE(), result.readInt32LE(4))
|
|
|
|
|
|
|
|
this.log.debug(
|
|
|
|
'sending %d messages: size = %db, acks = %d (msg_id = %s), ping = %s (msg_id = %s), state_req = %s (msg_id = %s), resend = %s (msg_id = %s), rpc = %s, container = %s, root msg_id = %l',
|
|
|
|
messageCount,
|
|
|
|
packetSize,
|
|
|
|
ackMsgIds?.length || 'false',
|
|
|
|
ackMsgIds?.map((it) => it.toString()),
|
2023-06-05 03:30:48 +03:00
|
|
|
Boolean(pingRequest),
|
2021-11-23 00:03:59 +03:00
|
|
|
pingMsgId,
|
|
|
|
getStateMsgIds?.map((it) => it.toString()) || 'false',
|
|
|
|
getStateMsgId,
|
|
|
|
resendMsgIds?.map((it) => it.toString()) || 'false',
|
|
|
|
resendMsgId,
|
|
|
|
rpcToSend.map((it) => it.method),
|
|
|
|
useContainer,
|
2023-06-05 03:30:48 +03:00
|
|
|
rootMsgId,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
2022-11-07 00:08:59 +03:00
|
|
|
this._session
|
|
|
|
.encryptMessage(result)
|
2021-11-23 00:03:59 +03:00
|
|
|
.then((enc) => this.send(enc))
|
2023-08-11 23:59:24 +03:00
|
|
|
.then(callback)
|
2021-11-23 00:03:59 +03:00
|
|
|
.catch((err) => {
|
|
|
|
this.log.error(
|
|
|
|
'error while sending pending messages (root msg_id = %l): %s',
|
|
|
|
rootMsgId,
|
2023-06-05 03:30:48 +03:00
|
|
|
err.stack,
|
2021-11-23 00:03:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// put acks in the front so they are the first to be sent
|
2023-08-11 23:59:24 +03:00
|
|
|
if (ackMsgIds) {
|
|
|
|
this._session.queuedAcks.splice(0, 0, ...ackMsgIds)
|
|
|
|
}
|
2021-11-23 00:03:59 +03:00
|
|
|
this._onMessageFailed(rootMsgId, 'unknown error')
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|