Skip to content

Update more libraries from 3.11 #5009

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
Jun 14, 2023
2 changes: 2 additions & 0 deletions Lib/heapq.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heappushpop(heap, item) # pushes a new item and then returns
# the smallest item; the heap size is unchanged
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged

Expand Down
15 changes: 10 additions & 5 deletions Lib/imghdr.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
"""Recognize image file formats based on their first few bytes."""

from os import PathLike
import warnings

__all__ = ["what"]

# should replace using FileIO into file
from io import FileIO

warnings._deprecated(__name__, remove=(3, 13))


#-------------------------#
# Recognize image headers #
#-------------------------#
Expand All @@ -15,7 +18,7 @@ def what(file, h=None):
try:
if h is None:
if isinstance(file, (str, PathLike)):
f = FileIO(file, 'rb')
f = open(file, 'rb')
h = f.read(32)
else:
location = file.tell()
Expand All @@ -37,9 +40,11 @@ def what(file, h=None):
tests = []

def test_jpeg(h, f):
"""JPEG data in JFIF or Exif format"""
"""JPEG data with JFIF or Exif markers; and raw JPEG"""
if h[6:10] in (b'JFIF', b'Exif'):
return 'jpeg'
elif h[:4] == b'\xff\xd8\xff\xdb':
return 'jpeg'

tests.append(test_jpeg)

Expand Down Expand Up @@ -154,7 +159,7 @@ def testall(list, recursive, toplevel):
if recursive or toplevel:
print('recursing down:')
import glob
names = glob.glob(os.path.join(filename, '*'))
names = glob.glob(os.path.join(glob.escape(filename), '*'))
testall(names, recursive, 0)
else:
print('*** directory (use -r) ***')
Expand Down
26 changes: 19 additions & 7 deletions Lib/ipaddress.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def v4_int_to_packed(address):

"""
try:
return address.to_bytes(4, 'big')
return address.to_bytes(4) # big endian
except OverflowError:
raise ValueError("Address negative or too large for IPv4")

Expand All @@ -148,7 +148,7 @@ def v6_int_to_packed(address):

"""
try:
return address.to_bytes(16, 'big')
return address.to_bytes(16) # big endian
except OverflowError:
raise ValueError("Address negative or too large for IPv6")

Expand Down Expand Up @@ -1077,15 +1077,16 @@ def is_link_local(self):

@property
def is_private(self):
"""Test if this address is allocated for private networks.
"""Test if this network belongs to a private range.

Returns:
A boolean, True if the address is reserved per
A boolean, True if the network is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.

"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
return any(self.network_address in priv_network and
self.broadcast_address in priv_network
for priv_network in self._constants._private_networks)

@property
def is_global(self):
Expand Down Expand Up @@ -1122,6 +1123,15 @@ def is_loopback(self):
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)


class _BaseConstants:

_private_networks = []


_BaseNetwork._constants = _BaseConstants


class _BaseV4:

"""Base IPv4 object.
Expand Down Expand Up @@ -1294,7 +1304,7 @@ def __init__(self, address):
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
self._ip = int.from_bytes(address, 'big')
self._ip = int.from_bytes(address) # big endian
return

# Assume input argument to be string or any object representation
Expand Down Expand Up @@ -1561,6 +1571,7 @@ class _IPv4Constants:


IPv4Address._constants = _IPv4Constants
IPv4Network._constants = _IPv4Constants


class _BaseV6:
Expand Down Expand Up @@ -2285,3 +2296,4 @@ class _IPv6Constants:


IPv6Address._constants = _IPv6Constants
IPv6Network._constants = _IPv6Constants
5 changes: 1 addition & 4 deletions Lib/linecache.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,7 @@

import functools
import sys
try:
import os
except ImportError:
import _dummy_os as os
import os
import tokenize

__all__ = ["getline", "clearcache", "checkcache", "lazycache"]
Expand Down
131 changes: 90 additions & 41 deletions Lib/netrc.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,50 @@ def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)


class _netrclex:
def __init__(self, fp):
self.lineno = 1
self.instream = fp
self.whitespace = "\n\t\r "
self.pushback = []

def _read_char(self):
ch = self.instream.read(1)
if ch == "\n":
self.lineno += 1
return ch

def get_token(self):
if self.pushback:
return self.pushback.pop(0)
token = ""
fiter = iter(self._read_char, "")
for ch in fiter:
if ch in self.whitespace:
continue
if ch == '"':
for ch in fiter:
if ch == '"':
return token
elif ch == "\\":
ch = self._read_char()
token += ch
else:
if ch == "\\":
ch = self._read_char()
token += ch
for ch in fiter:
if ch in self.whitespace:
return token
elif ch == "\\":
ch = self._read_char()
token += ch
return token

def push_token(self, token):
self.pushback.append(token)


class netrc:
def __init__(self, file=None):
default_netrc = file is None
Expand All @@ -34,9 +78,7 @@ def __init__(self, file=None):
self._parse(file, fp, default_netrc)

def _parse(self, file, fp, default_netrc):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
lexer.commenters = lexer.commenters.replace('#', '')
lexer = _netrclex(fp)
while 1:
# Look for a machine, default, or macdef top-level keyword
saved_lineno = lexer.lineno
Expand All @@ -51,68 +93,75 @@ def _parse(self, file, fp, default_netrc):
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
elif tt == 'macdef':
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
if not line:
raise NetrcParseError(
"Macro definition missing null line terminator.",
file, lexer.lineno)
if line == '\n':
# a macro definition finished with consecutive new-line
# characters. The first \n is encountered by the
# readline() method and this is the second \n.
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)

if not entryname:
raise NetrcParseError("missing %r name" % tt, file, lexer.lineno)

# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
login = account = password = ''
self.hosts[entryname] = {}
while 1:
prev_lineno = lexer.lineno
tt = lexer.get_token()
if (tt.startswith('#') or
tt in {'', 'machine', 'default', 'macdef'}):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
if tt.startswith('#'):
if lexer.lineno == prev_lineno:
lexer.instream.readline()
continue
if tt in {'', 'machine', 'default', 'macdef'}:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
if os.name == 'posix' and default_netrc:
prop = os.fstat(fp.fileno())
if prop.st_uid != os.getuid():
import pwd
try:
fowner = pwd.getpwuid(prop.st_uid)[0]
except KeyError:
fowner = 'uid %s' % prop.st_uid
try:
user = pwd.getpwuid(os.getuid())[0]
except KeyError:
user = 'uid %s' % os.getuid()
raise NetrcParseError(
("~/.netrc file owner (%s) does not match"
" current user (%s)") % (fowner, user),
file, lexer.lineno)
if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
raise NetrcParseError(
"~/.netrc access too permissive: access"
" permissions must restrict access to only"
" the owner", file, lexer.lineno)
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
self._security_check(fp, default_netrc, self.hosts[entryname][0])

def _security_check(self, fp, default_netrc, login):
if os.name == 'posix' and default_netrc and login != "anonymous":
prop = os.fstat(fp.fileno())
if prop.st_uid != os.getuid():
import pwd
try:
fowner = pwd.getpwuid(prop.st_uid)[0]
except KeyError:
fowner = 'uid %s' % prop.st_uid
try:
user = pwd.getpwuid(os.getuid())[0]
except KeyError:
user = 'uid %s' % os.getuid()
raise NetrcParseError(
(f"~/.netrc file owner ({fowner}, {user}) does not match"
" current user"))
if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
raise NetrcParseError(
"~/.netrc access too permissive: access"
" permissions must restrict access to only"
" the owner")

def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
Expand Down
3 changes: 3 additions & 0 deletions Lib/nntplib.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@
import collections
import datetime
import sys
import warnings

try:
import ssl
Expand All @@ -85,6 +86,8 @@
"decode_header",
]

warnings._deprecated(__name__, remove=(3, 13))

# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 3977 limits NNTP line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
Expand Down
Loading