
    g~C                        d dl mZ d dlZd dlmZ d dlmZmZmZm	Z	m
Z
mZ d dlZd dlmZ er
d dlZd dlmZ  G d d      Z ej(                  d	      dd
       ZddZy)    )annotationsN)ThreadPoolExecutor)TYPE_CHECKINGAbstractSet
CollectionLiteralNoReturnSequence)	_tiktokenc                     e Zd Zdd	 	 	 	 	 	 	 	 	 d$dZd%dZd&dZ e       dd	 	 	 	 	 	 	 d'dZ e       dd	 	 	 	 	 	 	 d(d	Zd
dd)dZ	d
 e       dd	 	 	 	 	 	 	 	 	 d*dZ
 e       dd	 	 	 	 	 	 	 d+dZd,dZd-dZd.d/dZd0dZd1dZd2dZdd
d	 	 	 	 	 	 	 d3dZd
d	 	 	 	 	 d4dZd5dZed6d       Zej0                  d7d       Zd8dZed6d       Zd9dZd&d Zd:d!Zd;d"Zd<d#Z y)=EncodingN)explicit_n_vocabc               Z   || _         || _        || _        || _        t	        t	        |j                               t	        |j                         d            | _        |r0t        |      t        |      z   |k(  sJ | j                  |dz
  k(  sJ t        j                  |||      | _
        y)aw  Creates an Encoding object.

        See openai_public.py for examples of how to construct an Encoding object.

        Args:
            name: The name of the encoding. It should be clear from the name of the encoding
                what behaviour to expect, in particular, encodings with different special tokens
                should have different names.
            pat_str: A regex pattern string that is used to split the input text.
            mergeable_ranks: A dictionary mapping mergeable token bytes to their ranks. The ranks
                must correspond to merge priority.
            special_tokens: A dictionary mapping special token strings to their token values.
            explicit_n_vocab: The number of tokens in the vocabulary. If provided, it is checked
                that the number of mergeable tokens and special tokens is equal to this number.
        r   )default   N)name_pat_str_mergeable_ranks_special_tokensmaxvaluesmax_token_valuelenr   CoreBPE	_core_bpe)selfr   pat_strmergeable_ranksspecial_tokensr   s         c/var/www/python.lazyprojects.co.uk/rdoDiscordBot/venv/lib/python3.12/site-packages/tiktoken/core.py__init__zEncoding.__init__   s    0 	 /-"&&()3~/D/D/FPQ+R 
 '#n*==AQQQQ''+;a+????"**?NGT    c                "    d| j                   dS )Nz
<Encoding >)r   r   s    r    __repr__zEncoding.__repr__8   s    DII=**r"   c                    	 | j                   j                  |      S # t        $ r@ |j                  dd      j	                  dd      }| j                   j                  |      cY S w xY w)zEncodes a string into tokens, ignoring special tokens.

        This is equivalent to `encode(text, disallowed_special=())` (but slightly faster).

        ```
        >>> enc.encode_ordinary("hello world")
        [31373, 995]
        utf-16surrogatepassreplace)r   encode_ordinaryUnicodeEncodeErrorencodedecoder   texts     r    r+   zEncoding.encode_ordinary?   s]    	8>>11$77! 	8;;x9@@9UD>>11$77	8s    AA&%A&allallowed_specialdisallowed_specialc                  |dk(  r| j                   }|dk(  r| j                   |z
  }|rPt        |t              st        |      }t        |      j	                  |      x}rt        |j                                	 | j                  j                  ||      S # t        $ rA |j                  dd      j                  dd      }| j                  j                  ||      cY S w xY w)aH  Encodes a string into tokens.

        Special tokens are artificial tokens used to unlock capabilities from a model,
        such as fill-in-the-middle. So we want to be careful about accidentally encoding special
        tokens, since they can be used to trick a model into doing something we don't want it to do.

        Hence, by default, encode will raise an error if it encounters text that corresponds
        to a special token. This can be controlled on a per-token level using the `allowed_special`
        and `disallowed_special` parameters. In particular:
        - Setting `disallowed_special` to () will prevent this function from raising errors and
          cause all text corresponding to special tokens to be encoded as natural text.
        - Setting `allowed_special` to "all" will cause this function to treat all text
          corresponding to special tokens to be encoded as special tokens.

        ```
        >>> enc.encode("hello world")
        [31373, 995]
        >>> enc.encode("<|endoftext|>", allowed_special={"<|endoftext|>"})
        [50256]
        >>> enc.encode("<|endoftext|>", allowed_special="all")
        [50256]
        >>> enc.encode("<|endoftext|>")
        # Raises ValueError
        >>> enc.encode("<|endoftext|>", disallowed_special=())
        [27, 91, 437, 1659, 5239, 91, 29]
        ```
        r1   r(   r)   r*   )special_tokens_set
isinstance	frozenset_special_token_regexsearchraise_disallowed_special_tokengroupr   r-   r,   r.   r   r0   r3   r4   matchs        r    r-   zEncoding.encodeO   s    D e#"55O&!%!8!8?!J0)<%./A%B",-?@GGMMuM.u{{}=
	@>>((??! 	@ ;;x9@@9UD>>((??	@s   9B ACCc                  |dk(  r| j                   }|dk(  r| j                   |z
  }|rPt        |t              st        |      }t        |      j	                  |      x}rt        |j                                ddl}| j                  j                  || j                         } |j                  ||j                        S )zEncodes a string into tokens, returning a numpy array.

        Avoids the overhead of copying the token buffer into a Python list.
        r1   r   N)dtype)r6   r7   r8   r9   r:   r;   r<   numpyr   encode_to_tiktoken_buffer
frombufferuint32)r   r0   r3   r4   r>   npbuffers          r    encode_to_numpyzEncoding.encode_to_numpy   s     e#"55O&!%!8!8?!J0)<%./A%B",-?@GGMMuM.u{{}=99$@W@WXr}}V29955r"      )num_threadsc                   t        j                  | j                        }t        |      5 }t	        |j                  ||            cddd       S # 1 sw Y   yxY w)aD  Encodes a list of strings into tokens, in parallel, ignoring special tokens.

        This is equivalent to `encode_batch(text, disallowed_special=())` (but slightly faster).

        ```
        >>> enc.encode_ordinary_batch(["hello world", "goodbye world"])
        [[31373, 995], [11274, 16390, 995]]
        ```
        N)	functoolspartialr+   r   listmap)r   r0   rI   encoderes        r    encode_ordinary_batchzEncoding.encode_ordinary_batch   sC     ##D$8$89,gt,- -,,s   AA)rI   r3   r4   c               >   |dk(  r| j                   }|dk(  r| j                   |z
  }t        |t              st        |      }t        j                  | j
                  ||      }t        |      5 }t        |j                  ||            cddd       S # 1 sw Y   yxY w)a  Encodes a list of strings into tokens, in parallel.

        See `encode` for more details on `allowed_special` and `disallowed_special`.

        ```
        >>> enc.encode_batch(["hello world", "goodbye world"])
        [[31373, 995], [11274, 16390, 995]]
        ```
        r1   r2   N)	r6   r7   r8   rK   rL   r-   r   rM   rN   )r   r0   rI   r3   r4   rO   rP   s          r    encode_batchzEncoding.encode_batch   s    " e#"55O&!%!8!8?!J,i8!*+=!>##KKM_
  ,gt,- -,,s   .BBc               (   |dk(  r| j                   }|dk(  r| j                   |z
  }|rPt        |t              st        |      }t        |      j	                  |      x}rt        |j                                | j                  j                  ||      S )a  Encodes a string into stable tokens and possible completion sequences.

        Note that the stable tokens will only represent a substring of `text`.

        See `encode` for more details on `allowed_special` and `disallowed_special`.

        This API should itself be considered unstable.

        ```
        >>> enc.encode_with_unstable("hello fanta")
        ([31373], [(277, 4910), (5113, 265), ..., (8842,)])

        >>> text = "..."
        >>> stable_tokens, completions = enc.encode_with_unstable(text)
        >>> assert text.encode().startswith(enc.decode_bytes(stable_tokens))
        >>> assert all(enc.decode_bytes(stable_tokens + seq).startswith(text.encode()) for seq in completions)
        ```
        r1   )	r6   r7   r8   r9   r:   r;   r<   r   encode_with_unstabler=   s        r    rU   zEncoding.encode_with_unstable   s    2 e#"55O&!%!8!8?!J0)<%./A%B",-?@GGMMuM.u{{}=~~224IIr"   c                z    t        |t              r|j                  d      }| j                  j	                  |      S )a  Encodes text corresponding to a single token to its token value.

        NOTE: this will encode all special tokens.

        Raises `KeyError` if the token is not in the vocabulary.

        ```
        >>> enc.encode_single_token("hello")
        31373
        ```
        utf-8)r7   strr-   r   encode_single_tokenr   text_or_bytess     r    rY   zEncoding.encode_single_token   s3     mS))009M~~11-@@r"   c                8    | j                   j                  |      S )zDecodes a list of tokens into bytes.

        ```
        >>> enc.decode_bytes([31373, 995])
        b'hello world'
        ```
        )r   decode_bytes)r   tokenss     r    r]   zEncoding.decode_bytes  s     ~~**622r"   r*   c                Z    | j                   j                  |      j                  d|      S )au  Decodes a list of tokens into a string.

        WARNING: the default behaviour of this function is lossy, since decoded bytes are not
        guaranteed to be valid UTF-8. You can control this behaviour using the `errors` parameter,
        for instance, setting `errors=strict`.

        ```
        >>> enc.decode([31373, 995])
        'hello world'
        ```
        rW   errors)r   r]   r.   )r   r^   ra   s      r    r.   zEncoding.decode  s)     ~~**6299'&9QQr"   c                8    | j                   j                  |      S )zDecodes a token into bytes.

        NOTE: this will decode all special tokens.

        Raises `KeyError` if the token is not in the vocabulary.

        ```
        >>> enc.decode_single_token_bytes(31373)
        b'hello'
        ```
        )r   decode_single_token_bytesr   tokens     r    rc   z"Encoding.decode_single_token_bytes  s     ~~77>>r"   c                J    |D cg c]  }| j                  |       c}S c c}w )zDecodes a list of tokens into a list of bytes.

        Useful for visualising tokenisation.
        >>> enc.decode_tokens_bytes([31373, 995])
        [b'hello', b' world']
        )rc   )r   r^   re   s      r    decode_tokens_byteszEncoding.decode_tokens_bytes,  s)     DJJ6%..u56JJJs    c           
        | j                  |      }d}g }|D ]F  }|j                  t        d|d|d   cxk  xr dk  nc z
               |t        d |D              z  }H dj	                  |      j                  dd      }||fS )	a.  Decodes a list of tokens into a string and a list of offsets.

        Each offset is the index into text corresponding to the start of each token.
        If UTF-8 character boundaries do not line up with token boundaries, the offset is the index
        of the first character that contains bytes from the token.

        This will currently raise if given tokens that decode to invalid UTF-8; this behaviour may
        change in the future to be more permissive.

        >>> enc.decode_with_offsets([31373, 995])
        ('hello world', [0, 5])
        r         c              3  <   K   | ]  }d |cxk  rdk  rn d  yw)ri   rj   r   N ).0cs     r    	<genexpr>z/Encoding.decode_with_offsets.<locals>.<genexpr>H  s     Eu!DA4D4DAus   	r"   rW   strictr`   )rg   appendr   sumjoinr.   )r   r^   token_bytestext_lenoffsetsre   r0   s          r    decode_with_offsetszEncoding.decode_with_offsets5  s     ..v6 ENN3q(deAh.E.E"FGHEuEEEH !
 xx$++GH+EW}r"   )ra   rI   c                   t        j                  | j                  |      }t        |      5 }t	        |j                  ||            cddd       S # 1 sw Y   yxY w)zADecodes a batch (list of lists of tokens) into a list of strings.r`   N)rK   rL   r.   r   rM   rN   )r   batchra   rI   decoderrP   s         r    decode_batchzEncoding.decode_batchN  sC     ##DKK?,gu-. -,,s   AAc                   t        |      5 }t        |j                  | j                  |            cddd       S # 1 sw Y   yxY w)z?Decodes a batch (list of lists of tokens) into a list of bytes.N)r   rM   rN   r]   )r   ry   rI   rP   s       r    decode_bytes_batchzEncoding.decode_bytes_batchV  s3      ,d//78 -,,s	   %;Ac                6    | j                   j                         S )z*Returns the list of all token byte values.)r   token_byte_valuesr%   s    r    r   zEncoding.token_byte_valuesa  s    ~~//11r"   c                     | j                   d   S )Nz<|endoftext|>)r   r%   s    r    	eot_tokenzEncoding.eot_tokene  s    ##O44r"   c                H    t        | j                  j                               S N)setr   keysr%   s    r    r6   zEncoding.special_tokens_seti  s    4'',,.//r"   c                B    t        |t              sJ || j                  v S r   )r7   int_special_token_valuesrd   s     r    is_special_tokenzEncoding.is_special_tokenm  s#    %%%%2222r"   c                     | j                   dz   S )zEFor backwards compatibility. Prefer to use `enc.max_token_value + 1`.r   )r   r%   s    r    n_vocabzEncoding.n_vocabq  s     ##a''r"   c                z    t        |t              r|j                  d      }| j                  j	                  |      S )zEncodes text corresponding to bytes without a regex split.

        NOTE: this will not encode any special tokens.

        ```
        >>> enc.encode_single_piece("helloqqqq")
        [31373, 38227, 38227]
        ```
        rW   )r7   rX   r-   r   encode_single_piecerZ   s     r    _encode_single_piecezEncoding._encode_single_piecez  s3     mS))009M~~11-@@r"   c                    t        j                  | j                        }g }t        j                  ||      D ],  }|j	                  | j
                  j                  |             . |S )z?Encodes a string into tokens, but do regex splitting in Python.)regexcompiler   findallextendr   r   )r   r0   _unused_patretpieces        r    _encode_only_native_bpez Encoding._encode_only_native_bpe  sN    mmDMM2]];5EJJt~~99%@A 6
r"   c                8    | j                   j                  |      S r   )r   _encode_bytesr/   s     r    r   zEncoding._encode_bytes  s    ~~++D11r"   c                    dd l }| |j                  j                  j                  | j                        u r| j                  S | j                  | j
                  | j                  | j                  dS )Nr   )r   r   r   r   )tiktoken.registryregistry	ENCODINGSgetr   r   r   r   )r   tiktokens     r    __getstate__zEncoding.__getstate__  s^      8$$..22499==99II}}#44"22	
 	
r"   c                    dd l }t        |t              r+|j                  j	                  |      j
                  | _        y  | j                  di | y )Nr   rl   )r   r7   rX   r   get_encoding__dict__r!   )r   valuer   s      r    __setstate__zEncoding.__setstate__  sA     eS!$--::5AJJDMr"   )
r   rX   r   rX   r   zdict[bytes, int]r   zdict[str, int]r   z
int | None)returnrX   )r0   rX   r   	list[int])r0   rX   r3   !Literal['all'] | AbstractSet[str]r4    Literal['all'] | Collection[str]r   r   )r0   rX   r3   r   r4   r   r   znpt.NDArray[np.uint32])r0   	list[str]rI   r   r   list[list[int]])
r0   r   rI   r   r3   r   r4   r   r   r   )r0   rX   r3   r   r4   r   r   z!tuple[list[int], list[list[int]]])r[   str | bytesr   r   )r^   Sequence[int]r   bytes)r*   )r^   r   ra   rX   r   rX   )re   r   r   r   )r^   r   r   list[bytes])r^   r   r   ztuple[str, list[int]])ry   Sequence[Sequence[int]]ra   rX   rI   r   r   r   )ry   r   rI   r   r   r   )r   r   )r   r   )r   zset[str])re   r   r   bool)r[   r   r   r   )r0   r   r   r   )r   object)r   r   r   None)!__name__
__module____qualname__r!   r&   r+   r   r-   rG   rQ   rS   rU   rY   r]   r.   rc   rg   rw   r{   r}   r   propertyr   rK   cached_propertyr6   r   r   r   r   r   r   r   rl   r"   r    r   r      s$    (,%U%U 	%U
 *%U '%U %%UN+8( >AU?D6@6@ ;	6@
 =6@ 
6@x >AU?D66 ;	6
 =6 
 64 LM .$ =@U?D.. 	.
 ;. =. 
.D >AU?D#J#J ;	#J
 =#J 
+#JJA(3R?K4 @I]^/,/9</WZ/	/ EF9,9>A9	92 5 5 0 03 ( (A2
r"   r   ri   )maxsizec                d    dj                  d | D              }t        j                  d| d      S )N|c              3  F   K   | ]  }t        j                  |        y wr   )r   escape)rm   re   s     r    ro   z'_special_token_regex.<locals>.<genexpr>  s     =fUU\\%(fs   !())rs   r   r   )r^   inners     r    r9   r9     s-    HH=f==E==1UG1&&r"   c           	     ,    t        d| d| d| d      )Nz;Encountered text corresponding to disallowed special token zo.
If you want this text to be encoded as a special token, pass it to `allowed_special`, e.g. `allowed_special={z, ...}`.
If you want this text to be encoded as normal text, disable the check for this token by passing `disallowed_special=(enc.special_tokens_set - {zQ})`.
To disable this check for all special tokens, pass `disallowed_special=()`.
)
ValueError)re   s    r    r;   r;     s;    

EeY OAAF	 JFFKY OX		X r"   )r^   zfrozenset[str]r   z'regex.Pattern[str]')re   rX   r   r	   )
__future__r   rK   concurrent.futuresr   typingr   r   r   r   r	   r
   r   r   r   rA   rE   numpy.typingnptr   	lru_cacher9   r;   rl   r"   r    <module>r      sV    "  1 V V  V Vr S!' "'
r"   